xref: /openbmc/linux/mm/hugetlb.c (revision 5e41540c)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Generic hugetlb support.
36d49e352SNadia Yvette Chambers  * (C) Nadia Yvette Chambers, April 2004
41da177e4SLinus Torvalds  */
51da177e4SLinus Torvalds #include <linux/list.h>
61da177e4SLinus Torvalds #include <linux/init.h>
71da177e4SLinus Torvalds #include <linux/mm.h>
8e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
91da177e4SLinus Torvalds #include <linux/sysctl.h>
101da177e4SLinus Torvalds #include <linux/highmem.h>
11cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
121da177e4SLinus Torvalds #include <linux/nodemask.h>
1363551ae0SDavid Gibson #include <linux/pagemap.h>
145da7ca86SChristoph Lameter #include <linux/mempolicy.h>
153b32123dSGideon Israel Dsouza #include <linux/compiler.h>
16aea47ff3SChristoph Lameter #include <linux/cpuset.h>
173935baa9SDavid Gibson #include <linux/mutex.h>
1897ad1087SMike Rapoport #include <linux/memblock.h>
19a3437870SNishanth Aravamudan #include <linux/sysfs.h>
205a0e3ad6STejun Heo #include <linux/slab.h>
2163489f8eSMike Kravetz #include <linux/mmdebug.h>
22174cd4b1SIngo Molnar #include <linux/sched/signal.h>
230fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
24c6247f72SMatthew Wilcox #include <linux/string_helpers.h>
25fd6a03edSNaoya Horiguchi #include <linux/swap.h>
26fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
278382d914SDavidlohr Bueso #include <linux/jhash.h>
28d6606683SLinus Torvalds 
2963551ae0SDavid Gibson #include <asm/page.h>
3063551ae0SDavid Gibson #include <asm/pgtable.h>
3124669e58SAneesh Kumar K.V #include <asm/tlb.h>
3263551ae0SDavid Gibson 
3324669e58SAneesh Kumar K.V #include <linux/io.h>
3463551ae0SDavid Gibson #include <linux/hugetlb.h>
359dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
369a305230SLee Schermerhorn #include <linux/node.h>
371a1aad8aSMike Kravetz #include <linux/userfaultfd_k.h>
38ab5ac90aSMichal Hocko #include <linux/page_owner.h>
397835e98bSNick Piggin #include "internal.h"
401da177e4SLinus Torvalds 
41c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
42e5ff2159SAndi Kleen unsigned int default_hstate_idx;
43e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
44641844f5SNaoya Horiguchi /*
45641844f5SNaoya Horiguchi  * Minimum page order among possible hugepage sizes, set to a proper value
46641844f5SNaoya Horiguchi  * at boot time.
47641844f5SNaoya Horiguchi  */
48641844f5SNaoya Horiguchi static unsigned int minimum_order __read_mostly = UINT_MAX;
49e5ff2159SAndi Kleen 
5053ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
5153ba51d2SJon Tollefson 
52e5ff2159SAndi Kleen /* for command line parsing */
53e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
54e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
55e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size;
569fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
57e5ff2159SAndi Kleen 
583935baa9SDavid Gibson /*
5931caf665SNaoya Horiguchi  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
6031caf665SNaoya Horiguchi  * free_huge_pages, and surplus_huge_pages.
613935baa9SDavid Gibson  */
62c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
630bd0f9fbSEric Paris 
648382d914SDavidlohr Bueso /*
658382d914SDavidlohr Bueso  * Serializes faults on the same logical page.  This is used to
668382d914SDavidlohr Bueso  * prevent spurious OOMs when the hugepage pool is fully utilized.
678382d914SDavidlohr Bueso  */
688382d914SDavidlohr Bueso static int num_fault_mutexes;
69c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
708382d914SDavidlohr Bueso 
717ca02d0aSMike Kravetz /* Forward declaration */
727ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
737ca02d0aSMike Kravetz 
7490481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
7590481622SDavid Gibson {
7690481622SDavid Gibson 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
7790481622SDavid Gibson 
7890481622SDavid Gibson 	spin_unlock(&spool->lock);
7990481622SDavid Gibson 
8090481622SDavid Gibson 	/* If no pages are used, and no other handles to the subpool
817ca02d0aSMike Kravetz 	 * remain, give up any reservations mased on minimum size and
827ca02d0aSMike Kravetz 	 * free the subpool */
837ca02d0aSMike Kravetz 	if (free) {
847ca02d0aSMike Kravetz 		if (spool->min_hpages != -1)
857ca02d0aSMike Kravetz 			hugetlb_acct_memory(spool->hstate,
867ca02d0aSMike Kravetz 						-spool->min_hpages);
8790481622SDavid Gibson 		kfree(spool);
8890481622SDavid Gibson 	}
897ca02d0aSMike Kravetz }
9090481622SDavid Gibson 
917ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
927ca02d0aSMike Kravetz 						long min_hpages)
9390481622SDavid Gibson {
9490481622SDavid Gibson 	struct hugepage_subpool *spool;
9590481622SDavid Gibson 
96c6a91820SMike Kravetz 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
9790481622SDavid Gibson 	if (!spool)
9890481622SDavid Gibson 		return NULL;
9990481622SDavid Gibson 
10090481622SDavid Gibson 	spin_lock_init(&spool->lock);
10190481622SDavid Gibson 	spool->count = 1;
1027ca02d0aSMike Kravetz 	spool->max_hpages = max_hpages;
1037ca02d0aSMike Kravetz 	spool->hstate = h;
1047ca02d0aSMike Kravetz 	spool->min_hpages = min_hpages;
1057ca02d0aSMike Kravetz 
1067ca02d0aSMike Kravetz 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1077ca02d0aSMike Kravetz 		kfree(spool);
1087ca02d0aSMike Kravetz 		return NULL;
1097ca02d0aSMike Kravetz 	}
1107ca02d0aSMike Kravetz 	spool->rsv_hpages = min_hpages;
11190481622SDavid Gibson 
11290481622SDavid Gibson 	return spool;
11390481622SDavid Gibson }
11490481622SDavid Gibson 
11590481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
11690481622SDavid Gibson {
11790481622SDavid Gibson 	spin_lock(&spool->lock);
11890481622SDavid Gibson 	BUG_ON(!spool->count);
11990481622SDavid Gibson 	spool->count--;
12090481622SDavid Gibson 	unlock_or_release_subpool(spool);
12190481622SDavid Gibson }
12290481622SDavid Gibson 
1231c5ecae3SMike Kravetz /*
1241c5ecae3SMike Kravetz  * Subpool accounting for allocating and reserving pages.
1251c5ecae3SMike Kravetz  * Return -ENOMEM if there are not enough resources to satisfy the
1261c5ecae3SMike Kravetz  * the request.  Otherwise, return the number of pages by which the
1271c5ecae3SMike Kravetz  * global pools must be adjusted (upward).  The returned value may
1281c5ecae3SMike Kravetz  * only be different than the passed value (delta) in the case where
1291c5ecae3SMike Kravetz  * a subpool minimum size must be manitained.
1301c5ecae3SMike Kravetz  */
1311c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
13290481622SDavid Gibson 				      long delta)
13390481622SDavid Gibson {
1341c5ecae3SMike Kravetz 	long ret = delta;
13590481622SDavid Gibson 
13690481622SDavid Gibson 	if (!spool)
1371c5ecae3SMike Kravetz 		return ret;
13890481622SDavid Gibson 
13990481622SDavid Gibson 	spin_lock(&spool->lock);
14090481622SDavid Gibson 
1411c5ecae3SMike Kravetz 	if (spool->max_hpages != -1) {		/* maximum size accounting */
1421c5ecae3SMike Kravetz 		if ((spool->used_hpages + delta) <= spool->max_hpages)
1431c5ecae3SMike Kravetz 			spool->used_hpages += delta;
1441c5ecae3SMike Kravetz 		else {
1451c5ecae3SMike Kravetz 			ret = -ENOMEM;
1461c5ecae3SMike Kravetz 			goto unlock_ret;
1471c5ecae3SMike Kravetz 		}
1481c5ecae3SMike Kravetz 	}
1491c5ecae3SMike Kravetz 
15009a95e29SMike Kravetz 	/* minimum size accounting */
15109a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
1521c5ecae3SMike Kravetz 		if (delta > spool->rsv_hpages) {
1531c5ecae3SMike Kravetz 			/*
1541c5ecae3SMike Kravetz 			 * Asking for more reserves than those already taken on
1551c5ecae3SMike Kravetz 			 * behalf of subpool.  Return difference.
1561c5ecae3SMike Kravetz 			 */
1571c5ecae3SMike Kravetz 			ret = delta - spool->rsv_hpages;
1581c5ecae3SMike Kravetz 			spool->rsv_hpages = 0;
1591c5ecae3SMike Kravetz 		} else {
1601c5ecae3SMike Kravetz 			ret = 0;	/* reserves already accounted for */
1611c5ecae3SMike Kravetz 			spool->rsv_hpages -= delta;
1621c5ecae3SMike Kravetz 		}
1631c5ecae3SMike Kravetz 	}
1641c5ecae3SMike Kravetz 
1651c5ecae3SMike Kravetz unlock_ret:
1661c5ecae3SMike Kravetz 	spin_unlock(&spool->lock);
16790481622SDavid Gibson 	return ret;
16890481622SDavid Gibson }
16990481622SDavid Gibson 
1701c5ecae3SMike Kravetz /*
1711c5ecae3SMike Kravetz  * Subpool accounting for freeing and unreserving pages.
1721c5ecae3SMike Kravetz  * Return the number of global page reservations that must be dropped.
1731c5ecae3SMike Kravetz  * The return value may only be different than the passed value (delta)
1741c5ecae3SMike Kravetz  * in the case where a subpool minimum size must be maintained.
1751c5ecae3SMike Kravetz  */
1761c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
17790481622SDavid Gibson 				       long delta)
17890481622SDavid Gibson {
1791c5ecae3SMike Kravetz 	long ret = delta;
1801c5ecae3SMike Kravetz 
18190481622SDavid Gibson 	if (!spool)
1821c5ecae3SMike Kravetz 		return delta;
18390481622SDavid Gibson 
18490481622SDavid Gibson 	spin_lock(&spool->lock);
1851c5ecae3SMike Kravetz 
1861c5ecae3SMike Kravetz 	if (spool->max_hpages != -1)		/* maximum size accounting */
18790481622SDavid Gibson 		spool->used_hpages -= delta;
1881c5ecae3SMike Kravetz 
18909a95e29SMike Kravetz 	 /* minimum size accounting */
19009a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
1911c5ecae3SMike Kravetz 		if (spool->rsv_hpages + delta <= spool->min_hpages)
1921c5ecae3SMike Kravetz 			ret = 0;
1931c5ecae3SMike Kravetz 		else
1941c5ecae3SMike Kravetz 			ret = spool->rsv_hpages + delta - spool->min_hpages;
1951c5ecae3SMike Kravetz 
1961c5ecae3SMike Kravetz 		spool->rsv_hpages += delta;
1971c5ecae3SMike Kravetz 		if (spool->rsv_hpages > spool->min_hpages)
1981c5ecae3SMike Kravetz 			spool->rsv_hpages = spool->min_hpages;
1991c5ecae3SMike Kravetz 	}
2001c5ecae3SMike Kravetz 
2011c5ecae3SMike Kravetz 	/*
2021c5ecae3SMike Kravetz 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
2031c5ecae3SMike Kravetz 	 * quota reference, free it now.
2041c5ecae3SMike Kravetz 	 */
20590481622SDavid Gibson 	unlock_or_release_subpool(spool);
2061c5ecae3SMike Kravetz 
2071c5ecae3SMike Kravetz 	return ret;
20890481622SDavid Gibson }
20990481622SDavid Gibson 
21090481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
21190481622SDavid Gibson {
21290481622SDavid Gibson 	return HUGETLBFS_SB(inode->i_sb)->spool;
21390481622SDavid Gibson }
21490481622SDavid Gibson 
21590481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
21690481622SDavid Gibson {
217496ad9aaSAl Viro 	return subpool_inode(file_inode(vma->vm_file));
21890481622SDavid Gibson }
21990481622SDavid Gibson 
220e7c4b0bfSAndy Whitcroft /*
22196822904SAndy Whitcroft  * Region tracking -- allows tracking of reservations and instantiated pages
22296822904SAndy Whitcroft  *                    across the pages in a mapping.
22384afd99bSAndy Whitcroft  *
2241dd308a7SMike Kravetz  * The region data structures are embedded into a resv_map and protected
2251dd308a7SMike Kravetz  * by a resv_map's lock.  The set of regions within the resv_map represent
2261dd308a7SMike Kravetz  * reservations for huge pages, or huge pages that have already been
2271dd308a7SMike Kravetz  * instantiated within the map.  The from and to elements are huge page
2281dd308a7SMike Kravetz  * indicies into the associated mapping.  from indicates the starting index
2291dd308a7SMike Kravetz  * of the region.  to represents the first index past the end of  the region.
2301dd308a7SMike Kravetz  *
2311dd308a7SMike Kravetz  * For example, a file region structure with from == 0 and to == 4 represents
2321dd308a7SMike Kravetz  * four huge pages in a mapping.  It is important to note that the to element
2331dd308a7SMike Kravetz  * represents the first element past the end of the region. This is used in
2341dd308a7SMike Kravetz  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
2351dd308a7SMike Kravetz  *
2361dd308a7SMike Kravetz  * Interval notation of the form [from, to) will be used to indicate that
2371dd308a7SMike Kravetz  * the endpoint from is inclusive and to is exclusive.
23896822904SAndy Whitcroft  */
23996822904SAndy Whitcroft struct file_region {
24096822904SAndy Whitcroft 	struct list_head link;
24196822904SAndy Whitcroft 	long from;
24296822904SAndy Whitcroft 	long to;
24396822904SAndy Whitcroft };
24496822904SAndy Whitcroft 
2451dd308a7SMike Kravetz /*
2461dd308a7SMike Kravetz  * Add the huge page range represented by [f, t) to the reserve
2475e911373SMike Kravetz  * map.  In the normal case, existing regions will be expanded
2485e911373SMike Kravetz  * to accommodate the specified range.  Sufficient regions should
2495e911373SMike Kravetz  * exist for expansion due to the previous call to region_chg
2505e911373SMike Kravetz  * with the same range.  However, it is possible that region_del
2515e911373SMike Kravetz  * could have been called after region_chg and modifed the map
2525e911373SMike Kravetz  * in such a way that no region exists to be expanded.  In this
2535e911373SMike Kravetz  * case, pull a region descriptor from the cache associated with
2545e911373SMike Kravetz  * the map and use that for the new range.
255cf3ad20bSMike Kravetz  *
256cf3ad20bSMike Kravetz  * Return the number of new huge pages added to the map.  This
257cf3ad20bSMike Kravetz  * number is greater than or equal to zero.
2581dd308a7SMike Kravetz  */
2591406ec9bSJoonsoo Kim static long region_add(struct resv_map *resv, long f, long t)
26096822904SAndy Whitcroft {
2611406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
26296822904SAndy Whitcroft 	struct file_region *rg, *nrg, *trg;
263cf3ad20bSMike Kravetz 	long add = 0;
26496822904SAndy Whitcroft 
2657b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
26696822904SAndy Whitcroft 	/* Locate the region we are either in or before. */
26796822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
26896822904SAndy Whitcroft 		if (f <= rg->to)
26996822904SAndy Whitcroft 			break;
27096822904SAndy Whitcroft 
2715e911373SMike Kravetz 	/*
2725e911373SMike Kravetz 	 * If no region exists which can be expanded to include the
2735e911373SMike Kravetz 	 * specified range, the list must have been modified by an
2745e911373SMike Kravetz 	 * interleving call to region_del().  Pull a region descriptor
2755e911373SMike Kravetz 	 * from the cache and use it for this range.
2765e911373SMike Kravetz 	 */
2775e911373SMike Kravetz 	if (&rg->link == head || t < rg->from) {
2785e911373SMike Kravetz 		VM_BUG_ON(resv->region_cache_count <= 0);
2795e911373SMike Kravetz 
2805e911373SMike Kravetz 		resv->region_cache_count--;
2815e911373SMike Kravetz 		nrg = list_first_entry(&resv->region_cache, struct file_region,
2825e911373SMike Kravetz 					link);
2835e911373SMike Kravetz 		list_del(&nrg->link);
2845e911373SMike Kravetz 
2855e911373SMike Kravetz 		nrg->from = f;
2865e911373SMike Kravetz 		nrg->to = t;
2875e911373SMike Kravetz 		list_add(&nrg->link, rg->link.prev);
2885e911373SMike Kravetz 
2895e911373SMike Kravetz 		add += t - f;
2905e911373SMike Kravetz 		goto out_locked;
2915e911373SMike Kravetz 	}
2925e911373SMike Kravetz 
29396822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
29496822904SAndy Whitcroft 	if (f > rg->from)
29596822904SAndy Whitcroft 		f = rg->from;
29696822904SAndy Whitcroft 
29796822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
29896822904SAndy Whitcroft 	nrg = rg;
29996822904SAndy Whitcroft 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
30096822904SAndy Whitcroft 		if (&rg->link == head)
30196822904SAndy Whitcroft 			break;
30296822904SAndy Whitcroft 		if (rg->from > t)
30396822904SAndy Whitcroft 			break;
30496822904SAndy Whitcroft 
30596822904SAndy Whitcroft 		/* If this area reaches higher then extend our area to
30696822904SAndy Whitcroft 		 * include it completely.  If this is not the first area
30796822904SAndy Whitcroft 		 * which we intend to reuse, free it. */
30896822904SAndy Whitcroft 		if (rg->to > t)
30996822904SAndy Whitcroft 			t = rg->to;
31096822904SAndy Whitcroft 		if (rg != nrg) {
311cf3ad20bSMike Kravetz 			/* Decrement return value by the deleted range.
312cf3ad20bSMike Kravetz 			 * Another range will span this area so that by
313cf3ad20bSMike Kravetz 			 * end of routine add will be >= zero
314cf3ad20bSMike Kravetz 			 */
315cf3ad20bSMike Kravetz 			add -= (rg->to - rg->from);
31696822904SAndy Whitcroft 			list_del(&rg->link);
31796822904SAndy Whitcroft 			kfree(rg);
31896822904SAndy Whitcroft 		}
31996822904SAndy Whitcroft 	}
320cf3ad20bSMike Kravetz 
321cf3ad20bSMike Kravetz 	add += (nrg->from - f);		/* Added to beginning of region */
32296822904SAndy Whitcroft 	nrg->from = f;
323cf3ad20bSMike Kravetz 	add += t - nrg->to;		/* Added to end of region */
32496822904SAndy Whitcroft 	nrg->to = t;
325cf3ad20bSMike Kravetz 
3265e911373SMike Kravetz out_locked:
3275e911373SMike Kravetz 	resv->adds_in_progress--;
3287b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
329cf3ad20bSMike Kravetz 	VM_BUG_ON(add < 0);
330cf3ad20bSMike Kravetz 	return add;
33196822904SAndy Whitcroft }
33296822904SAndy Whitcroft 
3331dd308a7SMike Kravetz /*
3341dd308a7SMike Kravetz  * Examine the existing reserve map and determine how many
3351dd308a7SMike Kravetz  * huge pages in the specified range [f, t) are NOT currently
3361dd308a7SMike Kravetz  * represented.  This routine is called before a subsequent
3371dd308a7SMike Kravetz  * call to region_add that will actually modify the reserve
3381dd308a7SMike Kravetz  * map to add the specified range [f, t).  region_chg does
3391dd308a7SMike Kravetz  * not change the number of huge pages represented by the
3401dd308a7SMike Kravetz  * map.  However, if the existing regions in the map can not
3411dd308a7SMike Kravetz  * be expanded to represent the new range, a new file_region
3421dd308a7SMike Kravetz  * structure is added to the map as a placeholder.  This is
3431dd308a7SMike Kravetz  * so that the subsequent region_add call will have all the
3441dd308a7SMike Kravetz  * regions it needs and will not fail.
3451dd308a7SMike Kravetz  *
3465e911373SMike Kravetz  * Upon entry, region_chg will also examine the cache of region descriptors
3475e911373SMike Kravetz  * associated with the map.  If there are not enough descriptors cached, one
3485e911373SMike Kravetz  * will be allocated for the in progress add operation.
3495e911373SMike Kravetz  *
3505e911373SMike Kravetz  * Returns the number of huge pages that need to be added to the existing
3515e911373SMike Kravetz  * reservation map for the range [f, t).  This number is greater or equal to
3525e911373SMike Kravetz  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
3535e911373SMike Kravetz  * is needed and can not be allocated.
3541dd308a7SMike Kravetz  */
3551406ec9bSJoonsoo Kim static long region_chg(struct resv_map *resv, long f, long t)
35696822904SAndy Whitcroft {
3571406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
3587b24d861SDavidlohr Bueso 	struct file_region *rg, *nrg = NULL;
35996822904SAndy Whitcroft 	long chg = 0;
36096822904SAndy Whitcroft 
3617b24d861SDavidlohr Bueso retry:
3627b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
3635e911373SMike Kravetz retry_locked:
3645e911373SMike Kravetz 	resv->adds_in_progress++;
3655e911373SMike Kravetz 
3665e911373SMike Kravetz 	/*
3675e911373SMike Kravetz 	 * Check for sufficient descriptors in the cache to accommodate
3685e911373SMike Kravetz 	 * the number of in progress add operations.
3695e911373SMike Kravetz 	 */
3705e911373SMike Kravetz 	if (resv->adds_in_progress > resv->region_cache_count) {
3715e911373SMike Kravetz 		struct file_region *trg;
3725e911373SMike Kravetz 
3735e911373SMike Kravetz 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
3745e911373SMike Kravetz 		/* Must drop lock to allocate a new descriptor. */
3755e911373SMike Kravetz 		resv->adds_in_progress--;
3765e911373SMike Kravetz 		spin_unlock(&resv->lock);
3775e911373SMike Kravetz 
3785e911373SMike Kravetz 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
379dbe409e4SMike Kravetz 		if (!trg) {
380dbe409e4SMike Kravetz 			kfree(nrg);
3815e911373SMike Kravetz 			return -ENOMEM;
382dbe409e4SMike Kravetz 		}
3835e911373SMike Kravetz 
3845e911373SMike Kravetz 		spin_lock(&resv->lock);
3855e911373SMike Kravetz 		list_add(&trg->link, &resv->region_cache);
3865e911373SMike Kravetz 		resv->region_cache_count++;
3875e911373SMike Kravetz 		goto retry_locked;
3885e911373SMike Kravetz 	}
3895e911373SMike Kravetz 
39096822904SAndy Whitcroft 	/* Locate the region we are before or in. */
39196822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
39296822904SAndy Whitcroft 		if (f <= rg->to)
39396822904SAndy Whitcroft 			break;
39496822904SAndy Whitcroft 
39596822904SAndy Whitcroft 	/* If we are below the current region then a new region is required.
39696822904SAndy Whitcroft 	 * Subtle, allocate a new region at the position but make it zero
39796822904SAndy Whitcroft 	 * size such that we can guarantee to record the reservation. */
39896822904SAndy Whitcroft 	if (&rg->link == head || t < rg->from) {
3997b24d861SDavidlohr Bueso 		if (!nrg) {
4005e911373SMike Kravetz 			resv->adds_in_progress--;
4017b24d861SDavidlohr Bueso 			spin_unlock(&resv->lock);
40296822904SAndy Whitcroft 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
40396822904SAndy Whitcroft 			if (!nrg)
40496822904SAndy Whitcroft 				return -ENOMEM;
4057b24d861SDavidlohr Bueso 
40696822904SAndy Whitcroft 			nrg->from = f;
40796822904SAndy Whitcroft 			nrg->to   = f;
40896822904SAndy Whitcroft 			INIT_LIST_HEAD(&nrg->link);
4097b24d861SDavidlohr Bueso 			goto retry;
4107b24d861SDavidlohr Bueso 		}
41196822904SAndy Whitcroft 
4127b24d861SDavidlohr Bueso 		list_add(&nrg->link, rg->link.prev);
4137b24d861SDavidlohr Bueso 		chg = t - f;
4147b24d861SDavidlohr Bueso 		goto out_nrg;
41596822904SAndy Whitcroft 	}
41696822904SAndy Whitcroft 
41796822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
41896822904SAndy Whitcroft 	if (f > rg->from)
41996822904SAndy Whitcroft 		f = rg->from;
42096822904SAndy Whitcroft 	chg = t - f;
42196822904SAndy Whitcroft 
42296822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
42396822904SAndy Whitcroft 	list_for_each_entry(rg, rg->link.prev, link) {
42496822904SAndy Whitcroft 		if (&rg->link == head)
42596822904SAndy Whitcroft 			break;
42696822904SAndy Whitcroft 		if (rg->from > t)
4277b24d861SDavidlohr Bueso 			goto out;
42896822904SAndy Whitcroft 
42925985edcSLucas De Marchi 		/* We overlap with this area, if it extends further than
43096822904SAndy Whitcroft 		 * us then we must extend ourselves.  Account for its
43196822904SAndy Whitcroft 		 * existing reservation. */
43296822904SAndy Whitcroft 		if (rg->to > t) {
43396822904SAndy Whitcroft 			chg += rg->to - t;
43496822904SAndy Whitcroft 			t = rg->to;
43596822904SAndy Whitcroft 		}
43696822904SAndy Whitcroft 		chg -= rg->to - rg->from;
43796822904SAndy Whitcroft 	}
4387b24d861SDavidlohr Bueso 
4397b24d861SDavidlohr Bueso out:
4407b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
4417b24d861SDavidlohr Bueso 	/*  We already know we raced and no longer need the new region */
4427b24d861SDavidlohr Bueso 	kfree(nrg);
4437b24d861SDavidlohr Bueso 	return chg;
4447b24d861SDavidlohr Bueso out_nrg:
4457b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
44696822904SAndy Whitcroft 	return chg;
44796822904SAndy Whitcroft }
44896822904SAndy Whitcroft 
4491dd308a7SMike Kravetz /*
4505e911373SMike Kravetz  * Abort the in progress add operation.  The adds_in_progress field
4515e911373SMike Kravetz  * of the resv_map keeps track of the operations in progress between
4525e911373SMike Kravetz  * calls to region_chg and region_add.  Operations are sometimes
4535e911373SMike Kravetz  * aborted after the call to region_chg.  In such cases, region_abort
4545e911373SMike Kravetz  * is called to decrement the adds_in_progress counter.
4555e911373SMike Kravetz  *
4565e911373SMike Kravetz  * NOTE: The range arguments [f, t) are not needed or used in this
4575e911373SMike Kravetz  * routine.  They are kept to make reading the calling code easier as
4585e911373SMike Kravetz  * arguments will match the associated region_chg call.
4595e911373SMike Kravetz  */
4605e911373SMike Kravetz static void region_abort(struct resv_map *resv, long f, long t)
4615e911373SMike Kravetz {
4625e911373SMike Kravetz 	spin_lock(&resv->lock);
4635e911373SMike Kravetz 	VM_BUG_ON(!resv->region_cache_count);
4645e911373SMike Kravetz 	resv->adds_in_progress--;
4655e911373SMike Kravetz 	spin_unlock(&resv->lock);
4665e911373SMike Kravetz }
4675e911373SMike Kravetz 
4685e911373SMike Kravetz /*
469feba16e2SMike Kravetz  * Delete the specified range [f, t) from the reserve map.  If the
470feba16e2SMike Kravetz  * t parameter is LONG_MAX, this indicates that ALL regions after f
471feba16e2SMike Kravetz  * should be deleted.  Locate the regions which intersect [f, t)
472feba16e2SMike Kravetz  * and either trim, delete or split the existing regions.
473feba16e2SMike Kravetz  *
474feba16e2SMike Kravetz  * Returns the number of huge pages deleted from the reserve map.
475feba16e2SMike Kravetz  * In the normal case, the return value is zero or more.  In the
476feba16e2SMike Kravetz  * case where a region must be split, a new region descriptor must
477feba16e2SMike Kravetz  * be allocated.  If the allocation fails, -ENOMEM will be returned.
478feba16e2SMike Kravetz  * NOTE: If the parameter t == LONG_MAX, then we will never split
479feba16e2SMike Kravetz  * a region and possibly return -ENOMEM.  Callers specifying
480feba16e2SMike Kravetz  * t == LONG_MAX do not need to check for -ENOMEM error.
4811dd308a7SMike Kravetz  */
482feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
48396822904SAndy Whitcroft {
4841406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
48596822904SAndy Whitcroft 	struct file_region *rg, *trg;
486feba16e2SMike Kravetz 	struct file_region *nrg = NULL;
487feba16e2SMike Kravetz 	long del = 0;
48896822904SAndy Whitcroft 
489feba16e2SMike Kravetz retry:
4907b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
491feba16e2SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
492dbe409e4SMike Kravetz 		/*
493dbe409e4SMike Kravetz 		 * Skip regions before the range to be deleted.  file_region
494dbe409e4SMike Kravetz 		 * ranges are normally of the form [from, to).  However, there
495dbe409e4SMike Kravetz 		 * may be a "placeholder" entry in the map which is of the form
496dbe409e4SMike Kravetz 		 * (from, to) with from == to.  Check for placeholder entries
497dbe409e4SMike Kravetz 		 * at the beginning of the range to be deleted.
498dbe409e4SMike Kravetz 		 */
499dbe409e4SMike Kravetz 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
500feba16e2SMike Kravetz 			continue;
501dbe409e4SMike Kravetz 
502feba16e2SMike Kravetz 		if (rg->from >= t)
50396822904SAndy Whitcroft 			break;
50496822904SAndy Whitcroft 
505feba16e2SMike Kravetz 		if (f > rg->from && t < rg->to) { /* Must split region */
506feba16e2SMike Kravetz 			/*
507feba16e2SMike Kravetz 			 * Check for an entry in the cache before dropping
508feba16e2SMike Kravetz 			 * lock and attempting allocation.
509feba16e2SMike Kravetz 			 */
510feba16e2SMike Kravetz 			if (!nrg &&
511feba16e2SMike Kravetz 			    resv->region_cache_count > resv->adds_in_progress) {
512feba16e2SMike Kravetz 				nrg = list_first_entry(&resv->region_cache,
513feba16e2SMike Kravetz 							struct file_region,
514feba16e2SMike Kravetz 							link);
515feba16e2SMike Kravetz 				list_del(&nrg->link);
516feba16e2SMike Kravetz 				resv->region_cache_count--;
51796822904SAndy Whitcroft 			}
51896822904SAndy Whitcroft 
519feba16e2SMike Kravetz 			if (!nrg) {
520feba16e2SMike Kravetz 				spin_unlock(&resv->lock);
521feba16e2SMike Kravetz 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
522feba16e2SMike Kravetz 				if (!nrg)
523feba16e2SMike Kravetz 					return -ENOMEM;
524feba16e2SMike Kravetz 				goto retry;
525feba16e2SMike Kravetz 			}
526feba16e2SMike Kravetz 
527feba16e2SMike Kravetz 			del += t - f;
528feba16e2SMike Kravetz 
529feba16e2SMike Kravetz 			/* New entry for end of split region */
530feba16e2SMike Kravetz 			nrg->from = t;
531feba16e2SMike Kravetz 			nrg->to = rg->to;
532feba16e2SMike Kravetz 			INIT_LIST_HEAD(&nrg->link);
533feba16e2SMike Kravetz 
534feba16e2SMike Kravetz 			/* Original entry is trimmed */
535feba16e2SMike Kravetz 			rg->to = f;
536feba16e2SMike Kravetz 
537feba16e2SMike Kravetz 			list_add(&nrg->link, &rg->link);
538feba16e2SMike Kravetz 			nrg = NULL;
53996822904SAndy Whitcroft 			break;
540feba16e2SMike Kravetz 		}
541feba16e2SMike Kravetz 
542feba16e2SMike Kravetz 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
543feba16e2SMike Kravetz 			del += rg->to - rg->from;
54496822904SAndy Whitcroft 			list_del(&rg->link);
54596822904SAndy Whitcroft 			kfree(rg);
546feba16e2SMike Kravetz 			continue;
54796822904SAndy Whitcroft 		}
5487b24d861SDavidlohr Bueso 
549feba16e2SMike Kravetz 		if (f <= rg->from) {	/* Trim beginning of region */
550feba16e2SMike Kravetz 			del += t - rg->from;
551feba16e2SMike Kravetz 			rg->from = t;
552feba16e2SMike Kravetz 		} else {		/* Trim end of region */
553feba16e2SMike Kravetz 			del += rg->to - f;
554feba16e2SMike Kravetz 			rg->to = f;
555feba16e2SMike Kravetz 		}
556feba16e2SMike Kravetz 	}
557feba16e2SMike Kravetz 
5587b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
559feba16e2SMike Kravetz 	kfree(nrg);
560feba16e2SMike Kravetz 	return del;
56196822904SAndy Whitcroft }
56296822904SAndy Whitcroft 
5631dd308a7SMike Kravetz /*
564b5cec28dSMike Kravetz  * A rare out of memory error was encountered which prevented removal of
565b5cec28dSMike Kravetz  * the reserve map region for a page.  The huge page itself was free'ed
566b5cec28dSMike Kravetz  * and removed from the page cache.  This routine will adjust the subpool
567b5cec28dSMike Kravetz  * usage count, and the global reserve count if needed.  By incrementing
568b5cec28dSMike Kravetz  * these counts, the reserve map entry which could not be deleted will
569b5cec28dSMike Kravetz  * appear as a "reserved" entry instead of simply dangling with incorrect
570b5cec28dSMike Kravetz  * counts.
571b5cec28dSMike Kravetz  */
57272e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
573b5cec28dSMike Kravetz {
574b5cec28dSMike Kravetz 	struct hugepage_subpool *spool = subpool_inode(inode);
575b5cec28dSMike Kravetz 	long rsv_adjust;
576b5cec28dSMike Kravetz 
577b5cec28dSMike Kravetz 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
57872e2936cSzhong jiang 	if (rsv_adjust) {
579b5cec28dSMike Kravetz 		struct hstate *h = hstate_inode(inode);
580b5cec28dSMike Kravetz 
581b5cec28dSMike Kravetz 		hugetlb_acct_memory(h, 1);
582b5cec28dSMike Kravetz 	}
583b5cec28dSMike Kravetz }
584b5cec28dSMike Kravetz 
585b5cec28dSMike Kravetz /*
5861dd308a7SMike Kravetz  * Count and return the number of huge pages in the reserve map
5871dd308a7SMike Kravetz  * that intersect with the range [f, t).
5881dd308a7SMike Kravetz  */
5891406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
59084afd99bSAndy Whitcroft {
5911406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
59284afd99bSAndy Whitcroft 	struct file_region *rg;
59384afd99bSAndy Whitcroft 	long chg = 0;
59484afd99bSAndy Whitcroft 
5957b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
59684afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
59784afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
598f2135a4aSWang Sheng-Hui 		long seg_from;
599f2135a4aSWang Sheng-Hui 		long seg_to;
60084afd99bSAndy Whitcroft 
60184afd99bSAndy Whitcroft 		if (rg->to <= f)
60284afd99bSAndy Whitcroft 			continue;
60384afd99bSAndy Whitcroft 		if (rg->from >= t)
60484afd99bSAndy Whitcroft 			break;
60584afd99bSAndy Whitcroft 
60684afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
60784afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
60884afd99bSAndy Whitcroft 
60984afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
61084afd99bSAndy Whitcroft 	}
6117b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
61284afd99bSAndy Whitcroft 
61384afd99bSAndy Whitcroft 	return chg;
61484afd99bSAndy Whitcroft }
61584afd99bSAndy Whitcroft 
61696822904SAndy Whitcroft /*
617e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
618e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
619e7c4b0bfSAndy Whitcroft  */
620a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
621a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
622e7c4b0bfSAndy Whitcroft {
623a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
624a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
625e7c4b0bfSAndy Whitcroft }
626e7c4b0bfSAndy Whitcroft 
6270fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
6280fe6e20bSNaoya Horiguchi 				     unsigned long address)
6290fe6e20bSNaoya Horiguchi {
6300fe6e20bSNaoya Horiguchi 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
6310fe6e20bSNaoya Horiguchi }
632dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
6330fe6e20bSNaoya Horiguchi 
63484afd99bSAndy Whitcroft /*
63508fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
63608fba699SMel Gorman  * cases this will be same size as used by the page table entries.
63708fba699SMel Gorman  */
63808fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
63908fba699SMel Gorman {
64005ea8860SDan Williams 	if (vma->vm_ops && vma->vm_ops->pagesize)
64105ea8860SDan Williams 		return vma->vm_ops->pagesize(vma);
64208fba699SMel Gorman 	return PAGE_SIZE;
64308fba699SMel Gorman }
644f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
64508fba699SMel Gorman 
64608fba699SMel Gorman /*
6473340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
6483340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
64909135cc5SDan Williams  * architectures where it differs, an architecture-specific 'strong'
65009135cc5SDan Williams  * version of this symbol is required.
6513340289dSMel Gorman  */
65209135cc5SDan Williams __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
6533340289dSMel Gorman {
6543340289dSMel Gorman 	return vma_kernel_pagesize(vma);
6553340289dSMel Gorman }
6563340289dSMel Gorman 
6573340289dSMel Gorman /*
65884afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
65984afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
66084afd99bSAndy Whitcroft  * alignment.
66184afd99bSAndy Whitcroft  */
66284afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
66384afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
66404f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
66584afd99bSAndy Whitcroft 
666a1e78772SMel Gorman /*
667a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
668a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
669a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
670a1e78772SMel Gorman  *
671a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
672a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
673a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
674a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
67584afd99bSAndy Whitcroft  *
67684afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
67784afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
67884afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
67984afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
68084afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
68184afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
68284afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
68384afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
684a1e78772SMel Gorman  */
685e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
686e7c4b0bfSAndy Whitcroft {
687e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
688e7c4b0bfSAndy Whitcroft }
689e7c4b0bfSAndy Whitcroft 
690e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
691e7c4b0bfSAndy Whitcroft 							unsigned long value)
692e7c4b0bfSAndy Whitcroft {
693e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
694e7c4b0bfSAndy Whitcroft }
695e7c4b0bfSAndy Whitcroft 
6969119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
69784afd99bSAndy Whitcroft {
69884afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
6995e911373SMike Kravetz 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
7005e911373SMike Kravetz 
7015e911373SMike Kravetz 	if (!resv_map || !rg) {
7025e911373SMike Kravetz 		kfree(resv_map);
7035e911373SMike Kravetz 		kfree(rg);
70484afd99bSAndy Whitcroft 		return NULL;
7055e911373SMike Kravetz 	}
70684afd99bSAndy Whitcroft 
70784afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
7087b24d861SDavidlohr Bueso 	spin_lock_init(&resv_map->lock);
70984afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
71084afd99bSAndy Whitcroft 
7115e911373SMike Kravetz 	resv_map->adds_in_progress = 0;
7125e911373SMike Kravetz 
7135e911373SMike Kravetz 	INIT_LIST_HEAD(&resv_map->region_cache);
7145e911373SMike Kravetz 	list_add(&rg->link, &resv_map->region_cache);
7155e911373SMike Kravetz 	resv_map->region_cache_count = 1;
7165e911373SMike Kravetz 
71784afd99bSAndy Whitcroft 	return resv_map;
71884afd99bSAndy Whitcroft }
71984afd99bSAndy Whitcroft 
7209119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
72184afd99bSAndy Whitcroft {
72284afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
7235e911373SMike Kravetz 	struct list_head *head = &resv_map->region_cache;
7245e911373SMike Kravetz 	struct file_region *rg, *trg;
72584afd99bSAndy Whitcroft 
72684afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
727feba16e2SMike Kravetz 	region_del(resv_map, 0, LONG_MAX);
7285e911373SMike Kravetz 
7295e911373SMike Kravetz 	/* ... and any entries left in the cache */
7305e911373SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
7315e911373SMike Kravetz 		list_del(&rg->link);
7325e911373SMike Kravetz 		kfree(rg);
7335e911373SMike Kravetz 	}
7345e911373SMike Kravetz 
7355e911373SMike Kravetz 	VM_BUG_ON(resv_map->adds_in_progress);
7365e911373SMike Kravetz 
73784afd99bSAndy Whitcroft 	kfree(resv_map);
73884afd99bSAndy Whitcroft }
73984afd99bSAndy Whitcroft 
7404e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
7414e35f483SJoonsoo Kim {
7424e35f483SJoonsoo Kim 	return inode->i_mapping->private_data;
7434e35f483SJoonsoo Kim }
7444e35f483SJoonsoo Kim 
74584afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
746a1e78772SMel Gorman {
74781d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
7484e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE) {
7494e35f483SJoonsoo Kim 		struct address_space *mapping = vma->vm_file->f_mapping;
7504e35f483SJoonsoo Kim 		struct inode *inode = mapping->host;
7514e35f483SJoonsoo Kim 
7524e35f483SJoonsoo Kim 		return inode_resv_map(inode);
7534e35f483SJoonsoo Kim 
7544e35f483SJoonsoo Kim 	} else {
75584afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
75684afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
7574e35f483SJoonsoo Kim 	}
758a1e78772SMel Gorman }
759a1e78772SMel Gorman 
76084afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
761a1e78772SMel Gorman {
76281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
76381d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
764a1e78772SMel Gorman 
76584afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
76684afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
76704f2cbe3SMel Gorman }
76804f2cbe3SMel Gorman 
76904f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
77004f2cbe3SMel Gorman {
77181d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
77281d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
773e7c4b0bfSAndy Whitcroft 
774e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
77504f2cbe3SMel Gorman }
77604f2cbe3SMel Gorman 
77704f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
77804f2cbe3SMel Gorman {
77981d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
780e7c4b0bfSAndy Whitcroft 
781e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
782a1e78772SMel Gorman }
783a1e78772SMel Gorman 
78404f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
785a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
786a1e78772SMel Gorman {
78781d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
788f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
789a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
790a1e78772SMel Gorman }
791a1e78772SMel Gorman 
792a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
793559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
794a1e78772SMel Gorman {
795af0ed73eSJoonsoo Kim 	if (vma->vm_flags & VM_NORESERVE) {
796af0ed73eSJoonsoo Kim 		/*
797af0ed73eSJoonsoo Kim 		 * This address is already reserved by other process(chg == 0),
798af0ed73eSJoonsoo Kim 		 * so, we should decrement reserved count. Without decrementing,
799af0ed73eSJoonsoo Kim 		 * reserve count remains after releasing inode, because this
800af0ed73eSJoonsoo Kim 		 * allocated page will go into page cache and is regarded as
801af0ed73eSJoonsoo Kim 		 * coming from reserved pool in releasing step.  Currently, we
802af0ed73eSJoonsoo Kim 		 * don't have any other solution to deal with this situation
803af0ed73eSJoonsoo Kim 		 * properly, so add work-around here.
804af0ed73eSJoonsoo Kim 		 */
805af0ed73eSJoonsoo Kim 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
806559ec2f8SNicholas Krause 			return true;
807af0ed73eSJoonsoo Kim 		else
808559ec2f8SNicholas Krause 			return false;
809af0ed73eSJoonsoo Kim 	}
810a63884e9SJoonsoo Kim 
811a63884e9SJoonsoo Kim 	/* Shared mappings always use reserves */
8121fb1b0e9SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE) {
8131fb1b0e9SMike Kravetz 		/*
8141fb1b0e9SMike Kravetz 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
8151fb1b0e9SMike Kravetz 		 * be a region map for all pages.  The only situation where
8161fb1b0e9SMike Kravetz 		 * there is no region map is if a hole was punched via
8171fb1b0e9SMike Kravetz 		 * fallocate.  In this case, there really are no reverves to
8181fb1b0e9SMike Kravetz 		 * use.  This situation is indicated if chg != 0.
8191fb1b0e9SMike Kravetz 		 */
8201fb1b0e9SMike Kravetz 		if (chg)
8211fb1b0e9SMike Kravetz 			return false;
8221fb1b0e9SMike Kravetz 		else
823559ec2f8SNicholas Krause 			return true;
8241fb1b0e9SMike Kravetz 	}
825a63884e9SJoonsoo Kim 
826a63884e9SJoonsoo Kim 	/*
827a63884e9SJoonsoo Kim 	 * Only the process that called mmap() has reserves for
828a63884e9SJoonsoo Kim 	 * private mappings.
829a63884e9SJoonsoo Kim 	 */
83067961f9dSMike Kravetz 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
83167961f9dSMike Kravetz 		/*
83267961f9dSMike Kravetz 		 * Like the shared case above, a hole punch or truncate
83367961f9dSMike Kravetz 		 * could have been performed on the private mapping.
83467961f9dSMike Kravetz 		 * Examine the value of chg to determine if reserves
83567961f9dSMike Kravetz 		 * actually exist or were previously consumed.
83667961f9dSMike Kravetz 		 * Very Subtle - The value of chg comes from a previous
83767961f9dSMike Kravetz 		 * call to vma_needs_reserves().  The reserve map for
83867961f9dSMike Kravetz 		 * private mappings has different (opposite) semantics
83967961f9dSMike Kravetz 		 * than that of shared mappings.  vma_needs_reserves()
84067961f9dSMike Kravetz 		 * has already taken this difference in semantics into
84167961f9dSMike Kravetz 		 * account.  Therefore, the meaning of chg is the same
84267961f9dSMike Kravetz 		 * as in the shared case above.  Code could easily be
84367961f9dSMike Kravetz 		 * combined, but keeping it separate draws attention to
84467961f9dSMike Kravetz 		 * subtle differences.
84567961f9dSMike Kravetz 		 */
84667961f9dSMike Kravetz 		if (chg)
84767961f9dSMike Kravetz 			return false;
84867961f9dSMike Kravetz 		else
849559ec2f8SNicholas Krause 			return true;
85067961f9dSMike Kravetz 	}
851a63884e9SJoonsoo Kim 
852559ec2f8SNicholas Krause 	return false;
853a1e78772SMel Gorman }
854a1e78772SMel Gorman 
855a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
8561da177e4SLinus Torvalds {
8571da177e4SLinus Torvalds 	int nid = page_to_nid(page);
8580edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_freelists[nid]);
859a5516438SAndi Kleen 	h->free_huge_pages++;
860a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
8611da177e4SLinus Torvalds }
8621da177e4SLinus Torvalds 
86394310cbcSAnshuman Khandual static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
864bf50bab2SNaoya Horiguchi {
865bf50bab2SNaoya Horiguchi 	struct page *page;
866bf50bab2SNaoya Horiguchi 
867c8721bbbSNaoya Horiguchi 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
868243abd5bSNaoya Horiguchi 		if (!PageHWPoison(page))
869c8721bbbSNaoya Horiguchi 			break;
870c8721bbbSNaoya Horiguchi 	/*
871c8721bbbSNaoya Horiguchi 	 * if 'non-isolated free hugepage' not found on the list,
872c8721bbbSNaoya Horiguchi 	 * the allocation fails.
873c8721bbbSNaoya Horiguchi 	 */
874c8721bbbSNaoya Horiguchi 	if (&h->hugepage_freelists[nid] == &page->lru)
875bf50bab2SNaoya Horiguchi 		return NULL;
8760edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_activelist);
877a9869b83SNaoya Horiguchi 	set_page_refcounted(page);
878bf50bab2SNaoya Horiguchi 	h->free_huge_pages--;
879bf50bab2SNaoya Horiguchi 	h->free_huge_pages_node[nid]--;
880bf50bab2SNaoya Horiguchi 	return page;
881bf50bab2SNaoya Horiguchi }
882bf50bab2SNaoya Horiguchi 
8833e59fcb0SMichal Hocko static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
8843e59fcb0SMichal Hocko 		nodemask_t *nmask)
88594310cbcSAnshuman Khandual {
8863e59fcb0SMichal Hocko 	unsigned int cpuset_mems_cookie;
8873e59fcb0SMichal Hocko 	struct zonelist *zonelist;
8883e59fcb0SMichal Hocko 	struct zone *zone;
8893e59fcb0SMichal Hocko 	struct zoneref *z;
8903e59fcb0SMichal Hocko 	int node = -1;
8913e59fcb0SMichal Hocko 
8923e59fcb0SMichal Hocko 	zonelist = node_zonelist(nid, gfp_mask);
8933e59fcb0SMichal Hocko 
8943e59fcb0SMichal Hocko retry_cpuset:
8953e59fcb0SMichal Hocko 	cpuset_mems_cookie = read_mems_allowed_begin();
8963e59fcb0SMichal Hocko 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
89794310cbcSAnshuman Khandual 		struct page *page;
89894310cbcSAnshuman Khandual 
8993e59fcb0SMichal Hocko 		if (!cpuset_zone_allowed(zone, gfp_mask))
9003e59fcb0SMichal Hocko 			continue;
9013e59fcb0SMichal Hocko 		/*
9023e59fcb0SMichal Hocko 		 * no need to ask again on the same node. Pool is node rather than
9033e59fcb0SMichal Hocko 		 * zone aware
9043e59fcb0SMichal Hocko 		 */
9053e59fcb0SMichal Hocko 		if (zone_to_nid(zone) == node)
9063e59fcb0SMichal Hocko 			continue;
9073e59fcb0SMichal Hocko 		node = zone_to_nid(zone);
90894310cbcSAnshuman Khandual 
90994310cbcSAnshuman Khandual 		page = dequeue_huge_page_node_exact(h, node);
91094310cbcSAnshuman Khandual 		if (page)
91194310cbcSAnshuman Khandual 			return page;
91294310cbcSAnshuman Khandual 	}
9133e59fcb0SMichal Hocko 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
9143e59fcb0SMichal Hocko 		goto retry_cpuset;
9153e59fcb0SMichal Hocko 
91694310cbcSAnshuman Khandual 	return NULL;
91794310cbcSAnshuman Khandual }
91894310cbcSAnshuman Khandual 
91986cdb465SNaoya Horiguchi /* Movability of hugepages depends on migration support. */
92086cdb465SNaoya Horiguchi static inline gfp_t htlb_alloc_mask(struct hstate *h)
92186cdb465SNaoya Horiguchi {
922d6cb41ccSMichal Hocko 	if (hugepage_migration_supported(h))
92386cdb465SNaoya Horiguchi 		return GFP_HIGHUSER_MOVABLE;
92486cdb465SNaoya Horiguchi 	else
92586cdb465SNaoya Horiguchi 		return GFP_HIGHUSER;
92686cdb465SNaoya Horiguchi }
92786cdb465SNaoya Horiguchi 
928a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
929a5516438SAndi Kleen 				struct vm_area_struct *vma,
930af0ed73eSJoonsoo Kim 				unsigned long address, int avoid_reserve,
931af0ed73eSJoonsoo Kim 				long chg)
9321da177e4SLinus Torvalds {
9333e59fcb0SMichal Hocko 	struct page *page;
934480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
93504ec6264SVlastimil Babka 	gfp_t gfp_mask;
9363e59fcb0SMichal Hocko 	nodemask_t *nodemask;
93704ec6264SVlastimil Babka 	int nid;
9381da177e4SLinus Torvalds 
939a1e78772SMel Gorman 	/*
940a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
941a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
942a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
943a1e78772SMel Gorman 	 */
944af0ed73eSJoonsoo Kim 	if (!vma_has_reserves(vma, chg) &&
945a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
946c0ff7453SMiao Xie 		goto err;
947a1e78772SMel Gorman 
94804f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
949a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
9506eab04a8SJustin P. Mattock 		goto err;
95104f2cbe3SMel Gorman 
95204ec6264SVlastimil Babka 	gfp_mask = htlb_alloc_mask(h);
95304ec6264SVlastimil Babka 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
9543e59fcb0SMichal Hocko 	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
9553e59fcb0SMichal Hocko 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
95607443a85SJoonsoo Kim 		SetPagePrivate(page);
957a63884e9SJoonsoo Kim 		h->resv_huge_pages--;
958bf50bab2SNaoya Horiguchi 	}
959cc9a6c87SMel Gorman 
960cc9a6c87SMel Gorman 	mpol_cond_put(mpol);
961cc9a6c87SMel Gorman 	return page;
962cc9a6c87SMel Gorman 
963c0ff7453SMiao Xie err:
964cc9a6c87SMel Gorman 	return NULL;
9651da177e4SLinus Torvalds }
9661da177e4SLinus Torvalds 
9671cac6f2cSLuiz Capitulino /*
9681cac6f2cSLuiz Capitulino  * common helper functions for hstate_next_node_to_{alloc|free}.
9691cac6f2cSLuiz Capitulino  * We may have allocated or freed a huge page based on a different
9701cac6f2cSLuiz Capitulino  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
9711cac6f2cSLuiz Capitulino  * be outside of *nodes_allowed.  Ensure that we use an allowed
9721cac6f2cSLuiz Capitulino  * node for alloc or free.
9731cac6f2cSLuiz Capitulino  */
9741cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
9751cac6f2cSLuiz Capitulino {
9760edaf86cSAndrew Morton 	nid = next_node_in(nid, *nodes_allowed);
9771cac6f2cSLuiz Capitulino 	VM_BUG_ON(nid >= MAX_NUMNODES);
9781cac6f2cSLuiz Capitulino 
9791cac6f2cSLuiz Capitulino 	return nid;
9801cac6f2cSLuiz Capitulino }
9811cac6f2cSLuiz Capitulino 
9821cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
9831cac6f2cSLuiz Capitulino {
9841cac6f2cSLuiz Capitulino 	if (!node_isset(nid, *nodes_allowed))
9851cac6f2cSLuiz Capitulino 		nid = next_node_allowed(nid, nodes_allowed);
9861cac6f2cSLuiz Capitulino 	return nid;
9871cac6f2cSLuiz Capitulino }
9881cac6f2cSLuiz Capitulino 
9891cac6f2cSLuiz Capitulino /*
9901cac6f2cSLuiz Capitulino  * returns the previously saved node ["this node"] from which to
9911cac6f2cSLuiz Capitulino  * allocate a persistent huge page for the pool and advance the
9921cac6f2cSLuiz Capitulino  * next node from which to allocate, handling wrap at end of node
9931cac6f2cSLuiz Capitulino  * mask.
9941cac6f2cSLuiz Capitulino  */
9951cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
9961cac6f2cSLuiz Capitulino 					nodemask_t *nodes_allowed)
9971cac6f2cSLuiz Capitulino {
9981cac6f2cSLuiz Capitulino 	int nid;
9991cac6f2cSLuiz Capitulino 
10001cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
10011cac6f2cSLuiz Capitulino 
10021cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
10031cac6f2cSLuiz Capitulino 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
10041cac6f2cSLuiz Capitulino 
10051cac6f2cSLuiz Capitulino 	return nid;
10061cac6f2cSLuiz Capitulino }
10071cac6f2cSLuiz Capitulino 
10081cac6f2cSLuiz Capitulino /*
10091cac6f2cSLuiz Capitulino  * helper for free_pool_huge_page() - return the previously saved
10101cac6f2cSLuiz Capitulino  * node ["this node"] from which to free a huge page.  Advance the
10111cac6f2cSLuiz Capitulino  * next node id whether or not we find a free huge page to free so
10121cac6f2cSLuiz Capitulino  * that the next attempt to free addresses the next node.
10131cac6f2cSLuiz Capitulino  */
10141cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
10151cac6f2cSLuiz Capitulino {
10161cac6f2cSLuiz Capitulino 	int nid;
10171cac6f2cSLuiz Capitulino 
10181cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
10191cac6f2cSLuiz Capitulino 
10201cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
10211cac6f2cSLuiz Capitulino 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
10221cac6f2cSLuiz Capitulino 
10231cac6f2cSLuiz Capitulino 	return nid;
10241cac6f2cSLuiz Capitulino }
10251cac6f2cSLuiz Capitulino 
10261cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
10271cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
10281cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
10291cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
10301cac6f2cSLuiz Capitulino 		nr_nodes--)
10311cac6f2cSLuiz Capitulino 
10321cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
10331cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
10341cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
10351cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
10361cac6f2cSLuiz Capitulino 		nr_nodes--)
10371cac6f2cSLuiz Capitulino 
1038e1073d1eSAneesh Kumar K.V #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1039944d9fecSLuiz Capitulino static void destroy_compound_gigantic_page(struct page *page,
1040d00181b9SKirill A. Shutemov 					unsigned int order)
1041944d9fecSLuiz Capitulino {
1042944d9fecSLuiz Capitulino 	int i;
1043944d9fecSLuiz Capitulino 	int nr_pages = 1 << order;
1044944d9fecSLuiz Capitulino 	struct page *p = page + 1;
1045944d9fecSLuiz Capitulino 
1046c8cc708aSGerald Schaefer 	atomic_set(compound_mapcount_ptr(page), 0);
1047944d9fecSLuiz Capitulino 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
10481d798ca3SKirill A. Shutemov 		clear_compound_head(p);
1049944d9fecSLuiz Capitulino 		set_page_refcounted(p);
1050944d9fecSLuiz Capitulino 	}
1051944d9fecSLuiz Capitulino 
1052944d9fecSLuiz Capitulino 	set_compound_order(page, 0);
1053944d9fecSLuiz Capitulino 	__ClearPageHead(page);
1054944d9fecSLuiz Capitulino }
1055944d9fecSLuiz Capitulino 
1056d00181b9SKirill A. Shutemov static void free_gigantic_page(struct page *page, unsigned int order)
1057944d9fecSLuiz Capitulino {
1058944d9fecSLuiz Capitulino 	free_contig_range(page_to_pfn(page), 1 << order);
1059944d9fecSLuiz Capitulino }
1060944d9fecSLuiz Capitulino 
1061944d9fecSLuiz Capitulino static int __alloc_gigantic_page(unsigned long start_pfn,
106279b63f12SMichal Hocko 				unsigned long nr_pages, gfp_t gfp_mask)
1063944d9fecSLuiz Capitulino {
1064944d9fecSLuiz Capitulino 	unsigned long end_pfn = start_pfn + nr_pages;
1065ca96b625SLucas Stach 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
106679b63f12SMichal Hocko 				  gfp_mask);
1067944d9fecSLuiz Capitulino }
1068944d9fecSLuiz Capitulino 
1069f44b2ddaSJoonsoo Kim static bool pfn_range_valid_gigantic(struct zone *z,
1070f44b2ddaSJoonsoo Kim 			unsigned long start_pfn, unsigned long nr_pages)
1071944d9fecSLuiz Capitulino {
1072944d9fecSLuiz Capitulino 	unsigned long i, end_pfn = start_pfn + nr_pages;
1073944d9fecSLuiz Capitulino 	struct page *page;
1074944d9fecSLuiz Capitulino 
1075944d9fecSLuiz Capitulino 	for (i = start_pfn; i < end_pfn; i++) {
1076944d9fecSLuiz Capitulino 		if (!pfn_valid(i))
1077944d9fecSLuiz Capitulino 			return false;
1078944d9fecSLuiz Capitulino 
1079944d9fecSLuiz Capitulino 		page = pfn_to_page(i);
1080944d9fecSLuiz Capitulino 
1081f44b2ddaSJoonsoo Kim 		if (page_zone(page) != z)
1082f44b2ddaSJoonsoo Kim 			return false;
1083f44b2ddaSJoonsoo Kim 
1084944d9fecSLuiz Capitulino 		if (PageReserved(page))
1085944d9fecSLuiz Capitulino 			return false;
1086944d9fecSLuiz Capitulino 
1087944d9fecSLuiz Capitulino 		if (page_count(page) > 0)
1088944d9fecSLuiz Capitulino 			return false;
1089944d9fecSLuiz Capitulino 
1090944d9fecSLuiz Capitulino 		if (PageHuge(page))
1091944d9fecSLuiz Capitulino 			return false;
1092944d9fecSLuiz Capitulino 	}
1093944d9fecSLuiz Capitulino 
1094944d9fecSLuiz Capitulino 	return true;
1095944d9fecSLuiz Capitulino }
1096944d9fecSLuiz Capitulino 
1097944d9fecSLuiz Capitulino static bool zone_spans_last_pfn(const struct zone *zone,
1098944d9fecSLuiz Capitulino 			unsigned long start_pfn, unsigned long nr_pages)
1099944d9fecSLuiz Capitulino {
1100944d9fecSLuiz Capitulino 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1101944d9fecSLuiz Capitulino 	return zone_spans_pfn(zone, last_pfn);
1102944d9fecSLuiz Capitulino }
1103944d9fecSLuiz Capitulino 
1104d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1105d9cc948fSMichal Hocko 		int nid, nodemask_t *nodemask)
1106944d9fecSLuiz Capitulino {
110779b63f12SMichal Hocko 	unsigned int order = huge_page_order(h);
1108944d9fecSLuiz Capitulino 	unsigned long nr_pages = 1 << order;
1109944d9fecSLuiz Capitulino 	unsigned long ret, pfn, flags;
111079b63f12SMichal Hocko 	struct zonelist *zonelist;
111179b63f12SMichal Hocko 	struct zone *zone;
111279b63f12SMichal Hocko 	struct zoneref *z;
1113944d9fecSLuiz Capitulino 
111479b63f12SMichal Hocko 	zonelist = node_zonelist(nid, gfp_mask);
1115d9cc948fSMichal Hocko 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
111679b63f12SMichal Hocko 		spin_lock_irqsave(&zone->lock, flags);
1117944d9fecSLuiz Capitulino 
111879b63f12SMichal Hocko 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
111979b63f12SMichal Hocko 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
112079b63f12SMichal Hocko 			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1121944d9fecSLuiz Capitulino 				/*
1122944d9fecSLuiz Capitulino 				 * We release the zone lock here because
1123944d9fecSLuiz Capitulino 				 * alloc_contig_range() will also lock the zone
1124944d9fecSLuiz Capitulino 				 * at some point. If there's an allocation
1125944d9fecSLuiz Capitulino 				 * spinning on this lock, it may win the race
1126944d9fecSLuiz Capitulino 				 * and cause alloc_contig_range() to fail...
1127944d9fecSLuiz Capitulino 				 */
112879b63f12SMichal Hocko 				spin_unlock_irqrestore(&zone->lock, flags);
112979b63f12SMichal Hocko 				ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1130944d9fecSLuiz Capitulino 				if (!ret)
1131944d9fecSLuiz Capitulino 					return pfn_to_page(pfn);
113279b63f12SMichal Hocko 				spin_lock_irqsave(&zone->lock, flags);
1133944d9fecSLuiz Capitulino 			}
1134944d9fecSLuiz Capitulino 			pfn += nr_pages;
1135944d9fecSLuiz Capitulino 		}
1136944d9fecSLuiz Capitulino 
113779b63f12SMichal Hocko 		spin_unlock_irqrestore(&zone->lock, flags);
1138944d9fecSLuiz Capitulino 	}
1139944d9fecSLuiz Capitulino 
1140944d9fecSLuiz Capitulino 	return NULL;
1141944d9fecSLuiz Capitulino }
1142944d9fecSLuiz Capitulino 
1143944d9fecSLuiz Capitulino static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1144d00181b9SKirill A. Shutemov static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1145944d9fecSLuiz Capitulino 
1146e1073d1eSAneesh Kumar K.V #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1147944d9fecSLuiz Capitulino static inline bool gigantic_page_supported(void) { return false; }
1148d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1149d9cc948fSMichal Hocko 		int nid, nodemask_t *nodemask) { return NULL; }
1150d00181b9SKirill A. Shutemov static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1151944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page,
1152d00181b9SKirill A. Shutemov 						unsigned int order) { }
1153944d9fecSLuiz Capitulino #endif
1154944d9fecSLuiz Capitulino 
1155a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page)
11566af2acb6SAdam Litke {
11576af2acb6SAdam Litke 	int i;
1158a5516438SAndi Kleen 
1159944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
1160944d9fecSLuiz Capitulino 		return;
116118229df5SAndy Whitcroft 
1162a5516438SAndi Kleen 	h->nr_huge_pages--;
1163a5516438SAndi Kleen 	h->nr_huge_pages_node[page_to_nid(page)]--;
1164a5516438SAndi Kleen 	for (i = 0; i < pages_per_huge_page(h); i++) {
116532f84528SChris Forbes 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
116632f84528SChris Forbes 				1 << PG_referenced | 1 << PG_dirty |
1167a7407a27SLuiz Capitulino 				1 << PG_active | 1 << PG_private |
1168a7407a27SLuiz Capitulino 				1 << PG_writeback);
11696af2acb6SAdam Litke 	}
1170309381feSSasha Levin 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1171f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
11726af2acb6SAdam Litke 	set_page_refcounted(page);
1173944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h)) {
1174944d9fecSLuiz Capitulino 		destroy_compound_gigantic_page(page, huge_page_order(h));
1175944d9fecSLuiz Capitulino 		free_gigantic_page(page, huge_page_order(h));
1176944d9fecSLuiz Capitulino 	} else {
1177a5516438SAndi Kleen 		__free_pages(page, huge_page_order(h));
11786af2acb6SAdam Litke 	}
1179944d9fecSLuiz Capitulino }
11806af2acb6SAdam Litke 
1181e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1182e5ff2159SAndi Kleen {
1183e5ff2159SAndi Kleen 	struct hstate *h;
1184e5ff2159SAndi Kleen 
1185e5ff2159SAndi Kleen 	for_each_hstate(h) {
1186e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
1187e5ff2159SAndi Kleen 			return h;
1188e5ff2159SAndi Kleen 	}
1189e5ff2159SAndi Kleen 	return NULL;
1190e5ff2159SAndi Kleen }
1191e5ff2159SAndi Kleen 
1192bcc54222SNaoya Horiguchi /*
1193bcc54222SNaoya Horiguchi  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1194bcc54222SNaoya Horiguchi  * to hstate->hugepage_activelist.)
1195bcc54222SNaoya Horiguchi  *
1196bcc54222SNaoya Horiguchi  * This function can be called for tail pages, but never returns true for them.
1197bcc54222SNaoya Horiguchi  */
1198bcc54222SNaoya Horiguchi bool page_huge_active(struct page *page)
1199bcc54222SNaoya Horiguchi {
1200bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1201bcc54222SNaoya Horiguchi 	return PageHead(page) && PagePrivate(&page[1]);
1202bcc54222SNaoya Horiguchi }
1203bcc54222SNaoya Horiguchi 
1204bcc54222SNaoya Horiguchi /* never called for tail page */
1205bcc54222SNaoya Horiguchi static void set_page_huge_active(struct page *page)
1206bcc54222SNaoya Horiguchi {
1207bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1208bcc54222SNaoya Horiguchi 	SetPagePrivate(&page[1]);
1209bcc54222SNaoya Horiguchi }
1210bcc54222SNaoya Horiguchi 
1211bcc54222SNaoya Horiguchi static void clear_page_huge_active(struct page *page)
1212bcc54222SNaoya Horiguchi {
1213bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1214bcc54222SNaoya Horiguchi 	ClearPagePrivate(&page[1]);
1215bcc54222SNaoya Horiguchi }
1216bcc54222SNaoya Horiguchi 
1217ab5ac90aSMichal Hocko /*
1218ab5ac90aSMichal Hocko  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1219ab5ac90aSMichal Hocko  * code
1220ab5ac90aSMichal Hocko  */
1221ab5ac90aSMichal Hocko static inline bool PageHugeTemporary(struct page *page)
1222ab5ac90aSMichal Hocko {
1223ab5ac90aSMichal Hocko 	if (!PageHuge(page))
1224ab5ac90aSMichal Hocko 		return false;
1225ab5ac90aSMichal Hocko 
1226ab5ac90aSMichal Hocko 	return (unsigned long)page[2].mapping == -1U;
1227ab5ac90aSMichal Hocko }
1228ab5ac90aSMichal Hocko 
1229ab5ac90aSMichal Hocko static inline void SetPageHugeTemporary(struct page *page)
1230ab5ac90aSMichal Hocko {
1231ab5ac90aSMichal Hocko 	page[2].mapping = (void *)-1U;
1232ab5ac90aSMichal Hocko }
1233ab5ac90aSMichal Hocko 
1234ab5ac90aSMichal Hocko static inline void ClearPageHugeTemporary(struct page *page)
1235ab5ac90aSMichal Hocko {
1236ab5ac90aSMichal Hocko 	page[2].mapping = NULL;
1237ab5ac90aSMichal Hocko }
1238ab5ac90aSMichal Hocko 
12398f1d26d0SAtsushi Kumagai void free_huge_page(struct page *page)
124027a85ef1SDavid Gibson {
1241a5516438SAndi Kleen 	/*
1242a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
1243a5516438SAndi Kleen 	 * compound page destructor.
1244a5516438SAndi Kleen 	 */
1245e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
12467893d1d5SAdam Litke 	int nid = page_to_nid(page);
124790481622SDavid Gibson 	struct hugepage_subpool *spool =
124890481622SDavid Gibson 		(struct hugepage_subpool *)page_private(page);
124907443a85SJoonsoo Kim 	bool restore_reserve;
125027a85ef1SDavid Gibson 
1251e5df70abSAndy Whitcroft 	set_page_private(page, 0);
125223be7468SMel Gorman 	page->mapping = NULL;
1253b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1254b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_mapcount(page), page);
125507443a85SJoonsoo Kim 	restore_reserve = PagePrivate(page);
125616c794b4SJoonsoo Kim 	ClearPagePrivate(page);
125727a85ef1SDavid Gibson 
12581c5ecae3SMike Kravetz 	/*
12591c5ecae3SMike Kravetz 	 * A return code of zero implies that the subpool will be under its
12601c5ecae3SMike Kravetz 	 * minimum size if the reservation is not restored after page is free.
12611c5ecae3SMike Kravetz 	 * Therefore, force restore_reserve operation.
12621c5ecae3SMike Kravetz 	 */
12631c5ecae3SMike Kravetz 	if (hugepage_subpool_put_pages(spool, 1) == 0)
12641c5ecae3SMike Kravetz 		restore_reserve = true;
12651c5ecae3SMike Kravetz 
126627a85ef1SDavid Gibson 	spin_lock(&hugetlb_lock);
1267bcc54222SNaoya Horiguchi 	clear_page_huge_active(page);
12686d76dcf4SAneesh Kumar K.V 	hugetlb_cgroup_uncharge_page(hstate_index(h),
12696d76dcf4SAneesh Kumar K.V 				     pages_per_huge_page(h), page);
127007443a85SJoonsoo Kim 	if (restore_reserve)
127107443a85SJoonsoo Kim 		h->resv_huge_pages++;
127207443a85SJoonsoo Kim 
1273ab5ac90aSMichal Hocko 	if (PageHugeTemporary(page)) {
1274ab5ac90aSMichal Hocko 		list_del(&page->lru);
1275ab5ac90aSMichal Hocko 		ClearPageHugeTemporary(page);
1276ab5ac90aSMichal Hocko 		update_and_free_page(h, page);
1277ab5ac90aSMichal Hocko 	} else if (h->surplus_huge_pages_node[nid]) {
12780edaecfaSAneesh Kumar K.V 		/* remove the page from active list */
12790edaecfaSAneesh Kumar K.V 		list_del(&page->lru);
1280a5516438SAndi Kleen 		update_and_free_page(h, page);
1281a5516438SAndi Kleen 		h->surplus_huge_pages--;
1282a5516438SAndi Kleen 		h->surplus_huge_pages_node[nid]--;
12837893d1d5SAdam Litke 	} else {
12845d3a551cSWill Deacon 		arch_clear_hugepage_flags(page);
1285a5516438SAndi Kleen 		enqueue_huge_page(h, page);
12867893d1d5SAdam Litke 	}
128727a85ef1SDavid Gibson 	spin_unlock(&hugetlb_lock);
128827a85ef1SDavid Gibson }
128927a85ef1SDavid Gibson 
1290a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1291b7ba30c6SAndi Kleen {
12920edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&page->lru);
1293f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1294b7ba30c6SAndi Kleen 	spin_lock(&hugetlb_lock);
12959dd540e2SAneesh Kumar K.V 	set_hugetlb_cgroup(page, NULL);
1296a5516438SAndi Kleen 	h->nr_huge_pages++;
1297a5516438SAndi Kleen 	h->nr_huge_pages_node[nid]++;
1298b7ba30c6SAndi Kleen 	spin_unlock(&hugetlb_lock);
1299b7ba30c6SAndi Kleen }
1300b7ba30c6SAndi Kleen 
1301d00181b9SKirill A. Shutemov static void prep_compound_gigantic_page(struct page *page, unsigned int order)
130220a0307cSWu Fengguang {
130320a0307cSWu Fengguang 	int i;
130420a0307cSWu Fengguang 	int nr_pages = 1 << order;
130520a0307cSWu Fengguang 	struct page *p = page + 1;
130620a0307cSWu Fengguang 
130720a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
130820a0307cSWu Fengguang 	set_compound_order(page, order);
1309ef5a22beSAndrea Arcangeli 	__ClearPageReserved(page);
1310de09d31dSKirill A. Shutemov 	__SetPageHead(page);
131120a0307cSWu Fengguang 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1312ef5a22beSAndrea Arcangeli 		/*
1313ef5a22beSAndrea Arcangeli 		 * For gigantic hugepages allocated through bootmem at
1314ef5a22beSAndrea Arcangeli 		 * boot, it's safer to be consistent with the not-gigantic
1315ef5a22beSAndrea Arcangeli 		 * hugepages and clear the PG_reserved bit from all tail pages
1316ef5a22beSAndrea Arcangeli 		 * too.  Otherwse drivers using get_user_pages() to access tail
1317ef5a22beSAndrea Arcangeli 		 * pages may get the reference counting wrong if they see
1318ef5a22beSAndrea Arcangeli 		 * PG_reserved set on a tail page (despite the head page not
1319ef5a22beSAndrea Arcangeli 		 * having PG_reserved set).  Enforcing this consistency between
1320ef5a22beSAndrea Arcangeli 		 * head and tail pages allows drivers to optimize away a check
1321ef5a22beSAndrea Arcangeli 		 * on the head page when they need know if put_page() is needed
1322ef5a22beSAndrea Arcangeli 		 * after get_user_pages().
1323ef5a22beSAndrea Arcangeli 		 */
1324ef5a22beSAndrea Arcangeli 		__ClearPageReserved(p);
132558a84aa9SYouquan Song 		set_page_count(p, 0);
13261d798ca3SKirill A. Shutemov 		set_compound_head(p, page);
132720a0307cSWu Fengguang 	}
1328b4330afbSMike Kravetz 	atomic_set(compound_mapcount_ptr(page), -1);
132920a0307cSWu Fengguang }
133020a0307cSWu Fengguang 
13317795912cSAndrew Morton /*
13327795912cSAndrew Morton  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
13337795912cSAndrew Morton  * transparent huge pages.  See the PageTransHuge() documentation for more
13347795912cSAndrew Morton  * details.
13357795912cSAndrew Morton  */
133620a0307cSWu Fengguang int PageHuge(struct page *page)
133720a0307cSWu Fengguang {
133820a0307cSWu Fengguang 	if (!PageCompound(page))
133920a0307cSWu Fengguang 		return 0;
134020a0307cSWu Fengguang 
134120a0307cSWu Fengguang 	page = compound_head(page);
1342f1e61557SKirill A. Shutemov 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
134320a0307cSWu Fengguang }
134443131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge);
134543131e14SNaoya Horiguchi 
134627c73ae7SAndrea Arcangeli /*
134727c73ae7SAndrea Arcangeli  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
134827c73ae7SAndrea Arcangeli  * normal or transparent huge pages.
134927c73ae7SAndrea Arcangeli  */
135027c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head)
135127c73ae7SAndrea Arcangeli {
135227c73ae7SAndrea Arcangeli 	if (!PageHead(page_head))
135327c73ae7SAndrea Arcangeli 		return 0;
135427c73ae7SAndrea Arcangeli 
1355758f66a2SAndrew Morton 	return get_compound_page_dtor(page_head) == free_huge_page;
135627c73ae7SAndrea Arcangeli }
135727c73ae7SAndrea Arcangeli 
135813d60f4bSZhang Yi pgoff_t __basepage_index(struct page *page)
135913d60f4bSZhang Yi {
136013d60f4bSZhang Yi 	struct page *page_head = compound_head(page);
136113d60f4bSZhang Yi 	pgoff_t index = page_index(page_head);
136213d60f4bSZhang Yi 	unsigned long compound_idx;
136313d60f4bSZhang Yi 
136413d60f4bSZhang Yi 	if (!PageHuge(page_head))
136513d60f4bSZhang Yi 		return page_index(page);
136613d60f4bSZhang Yi 
136713d60f4bSZhang Yi 	if (compound_order(page_head) >= MAX_ORDER)
136813d60f4bSZhang Yi 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
136913d60f4bSZhang Yi 	else
137013d60f4bSZhang Yi 		compound_idx = page - page_head;
137113d60f4bSZhang Yi 
137213d60f4bSZhang Yi 	return (index << compound_order(page_head)) + compound_idx;
137313d60f4bSZhang Yi }
137413d60f4bSZhang Yi 
13750c397daeSMichal Hocko static struct page *alloc_buddy_huge_page(struct hstate *h,
1376af0fb9dfSMichal Hocko 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
13771da177e4SLinus Torvalds {
1378af0fb9dfSMichal Hocko 	int order = huge_page_order(h);
13791da177e4SLinus Torvalds 	struct page *page;
1380f96efd58SJoe Jin 
1381af0fb9dfSMichal Hocko 	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1382af0fb9dfSMichal Hocko 	if (nid == NUMA_NO_NODE)
1383af0fb9dfSMichal Hocko 		nid = numa_mem_id();
1384af0fb9dfSMichal Hocko 	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1385af0fb9dfSMichal Hocko 	if (page)
1386af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1387af0fb9dfSMichal Hocko 	else
1388af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
138963b4613cSNishanth Aravamudan 
139063b4613cSNishanth Aravamudan 	return page;
139163b4613cSNishanth Aravamudan }
139263b4613cSNishanth Aravamudan 
1393af0fb9dfSMichal Hocko /*
13940c397daeSMichal Hocko  * Common helper to allocate a fresh hugetlb page. All specific allocators
13950c397daeSMichal Hocko  * should use this function to get new hugetlb pages
13960c397daeSMichal Hocko  */
13970c397daeSMichal Hocko static struct page *alloc_fresh_huge_page(struct hstate *h,
13980c397daeSMichal Hocko 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
13990c397daeSMichal Hocko {
14000c397daeSMichal Hocko 	struct page *page;
14010c397daeSMichal Hocko 
14020c397daeSMichal Hocko 	if (hstate_is_gigantic(h))
14030c397daeSMichal Hocko 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
14040c397daeSMichal Hocko 	else
14050c397daeSMichal Hocko 		page = alloc_buddy_huge_page(h, gfp_mask,
14060c397daeSMichal Hocko 				nid, nmask);
14070c397daeSMichal Hocko 	if (!page)
14080c397daeSMichal Hocko 		return NULL;
14090c397daeSMichal Hocko 
14100c397daeSMichal Hocko 	if (hstate_is_gigantic(h))
14110c397daeSMichal Hocko 		prep_compound_gigantic_page(page, huge_page_order(h));
14120c397daeSMichal Hocko 	prep_new_huge_page(h, page, page_to_nid(page));
14130c397daeSMichal Hocko 
14140c397daeSMichal Hocko 	return page;
14150c397daeSMichal Hocko }
14160c397daeSMichal Hocko 
14170c397daeSMichal Hocko /*
1418af0fb9dfSMichal Hocko  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1419af0fb9dfSMichal Hocko  * manner.
1420af0fb9dfSMichal Hocko  */
14210c397daeSMichal Hocko static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1422b2261026SJoonsoo Kim {
1423b2261026SJoonsoo Kim 	struct page *page;
1424b2261026SJoonsoo Kim 	int nr_nodes, node;
1425af0fb9dfSMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1426b2261026SJoonsoo Kim 
1427b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
14280c397daeSMichal Hocko 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1429af0fb9dfSMichal Hocko 		if (page)
1430b2261026SJoonsoo Kim 			break;
1431b2261026SJoonsoo Kim 	}
1432b2261026SJoonsoo Kim 
1433af0fb9dfSMichal Hocko 	if (!page)
1434af0fb9dfSMichal Hocko 		return 0;
1435b2261026SJoonsoo Kim 
1436af0fb9dfSMichal Hocko 	put_page(page); /* free it into the hugepage allocator */
1437af0fb9dfSMichal Hocko 
1438af0fb9dfSMichal Hocko 	return 1;
1439b2261026SJoonsoo Kim }
1440b2261026SJoonsoo Kim 
1441e8c5c824SLee Schermerhorn /*
1442e8c5c824SLee Schermerhorn  * Free huge page from pool from next node to free.
1443e8c5c824SLee Schermerhorn  * Attempt to keep persistent huge pages more or less
1444e8c5c824SLee Schermerhorn  * balanced over allowed nodes.
1445e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
1446e8c5c824SLee Schermerhorn  */
14476ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
14486ae11b27SLee Schermerhorn 							 bool acct_surplus)
1449e8c5c824SLee Schermerhorn {
1450b2261026SJoonsoo Kim 	int nr_nodes, node;
1451e8c5c824SLee Schermerhorn 	int ret = 0;
1452e8c5c824SLee Schermerhorn 
1453b2261026SJoonsoo Kim 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1454685f3457SLee Schermerhorn 		/*
1455685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
1456685f3457SLee Schermerhorn 		 * nodes with surplus pages.
1457685f3457SLee Schermerhorn 		 */
1458b2261026SJoonsoo Kim 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1459b2261026SJoonsoo Kim 		    !list_empty(&h->hugepage_freelists[node])) {
1460e8c5c824SLee Schermerhorn 			struct page *page =
1461b2261026SJoonsoo Kim 				list_entry(h->hugepage_freelists[node].next,
1462e8c5c824SLee Schermerhorn 					  struct page, lru);
1463e8c5c824SLee Schermerhorn 			list_del(&page->lru);
1464e8c5c824SLee Schermerhorn 			h->free_huge_pages--;
1465b2261026SJoonsoo Kim 			h->free_huge_pages_node[node]--;
1466685f3457SLee Schermerhorn 			if (acct_surplus) {
1467685f3457SLee Schermerhorn 				h->surplus_huge_pages--;
1468b2261026SJoonsoo Kim 				h->surplus_huge_pages_node[node]--;
1469685f3457SLee Schermerhorn 			}
1470e8c5c824SLee Schermerhorn 			update_and_free_page(h, page);
1471e8c5c824SLee Schermerhorn 			ret = 1;
14729a76db09SLee Schermerhorn 			break;
1473e8c5c824SLee Schermerhorn 		}
1474b2261026SJoonsoo Kim 	}
1475e8c5c824SLee Schermerhorn 
1476e8c5c824SLee Schermerhorn 	return ret;
1477e8c5c824SLee Schermerhorn }
1478e8c5c824SLee Schermerhorn 
1479c8721bbbSNaoya Horiguchi /*
1480c8721bbbSNaoya Horiguchi  * Dissolve a given free hugepage into free buddy pages. This function does
1481082d5b6bSGerald Schaefer  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
14826bc9b564SNaoya Horiguchi  * dissolution fails because a give page is not a free hugepage, or because
14836bc9b564SNaoya Horiguchi  * free hugepages are fully reserved.
1484c8721bbbSNaoya Horiguchi  */
1485c3114a84SAnshuman Khandual int dissolve_free_huge_page(struct page *page)
1486c8721bbbSNaoya Horiguchi {
14876bc9b564SNaoya Horiguchi 	int rc = -EBUSY;
1488082d5b6bSGerald Schaefer 
1489c8721bbbSNaoya Horiguchi 	spin_lock(&hugetlb_lock);
1490c8721bbbSNaoya Horiguchi 	if (PageHuge(page) && !page_count(page)) {
14912247bb33SGerald Schaefer 		struct page *head = compound_head(page);
14922247bb33SGerald Schaefer 		struct hstate *h = page_hstate(head);
14932247bb33SGerald Schaefer 		int nid = page_to_nid(head);
14946bc9b564SNaoya Horiguchi 		if (h->free_huge_pages - h->resv_huge_pages == 0)
1495082d5b6bSGerald Schaefer 			goto out;
1496c3114a84SAnshuman Khandual 		/*
1497c3114a84SAnshuman Khandual 		 * Move PageHWPoison flag from head page to the raw error page,
1498c3114a84SAnshuman Khandual 		 * which makes any subpages rather than the error page reusable.
1499c3114a84SAnshuman Khandual 		 */
1500c3114a84SAnshuman Khandual 		if (PageHWPoison(head) && page != head) {
1501c3114a84SAnshuman Khandual 			SetPageHWPoison(page);
1502c3114a84SAnshuman Khandual 			ClearPageHWPoison(head);
1503c3114a84SAnshuman Khandual 		}
15042247bb33SGerald Schaefer 		list_del(&head->lru);
1505c8721bbbSNaoya Horiguchi 		h->free_huge_pages--;
1506c8721bbbSNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
1507c1470b33Szhong jiang 		h->max_huge_pages--;
15082247bb33SGerald Schaefer 		update_and_free_page(h, head);
15096bc9b564SNaoya Horiguchi 		rc = 0;
1510c8721bbbSNaoya Horiguchi 	}
1511082d5b6bSGerald Schaefer out:
1512c8721bbbSNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
1513082d5b6bSGerald Schaefer 	return rc;
1514c8721bbbSNaoya Horiguchi }
1515c8721bbbSNaoya Horiguchi 
1516c8721bbbSNaoya Horiguchi /*
1517c8721bbbSNaoya Horiguchi  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1518c8721bbbSNaoya Horiguchi  * make specified memory blocks removable from the system.
15192247bb33SGerald Schaefer  * Note that this will dissolve a free gigantic hugepage completely, if any
15202247bb33SGerald Schaefer  * part of it lies within the given range.
1521082d5b6bSGerald Schaefer  * Also note that if dissolve_free_huge_page() returns with an error, all
1522082d5b6bSGerald Schaefer  * free hugepages that were dissolved before that error are lost.
1523c8721bbbSNaoya Horiguchi  */
1524082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1525c8721bbbSNaoya Horiguchi {
1526c8721bbbSNaoya Horiguchi 	unsigned long pfn;
1527eb03aa00SGerald Schaefer 	struct page *page;
1528082d5b6bSGerald Schaefer 	int rc = 0;
1529c8721bbbSNaoya Horiguchi 
1530d0177639SLi Zhong 	if (!hugepages_supported())
1531082d5b6bSGerald Schaefer 		return rc;
1532d0177639SLi Zhong 
1533eb03aa00SGerald Schaefer 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1534eb03aa00SGerald Schaefer 		page = pfn_to_page(pfn);
1535eb03aa00SGerald Schaefer 		if (PageHuge(page) && !page_count(page)) {
1536eb03aa00SGerald Schaefer 			rc = dissolve_free_huge_page(page);
1537eb03aa00SGerald Schaefer 			if (rc)
1538082d5b6bSGerald Schaefer 				break;
1539eb03aa00SGerald Schaefer 		}
1540eb03aa00SGerald Schaefer 	}
1541082d5b6bSGerald Schaefer 
1542082d5b6bSGerald Schaefer 	return rc;
1543c8721bbbSNaoya Horiguchi }
1544c8721bbbSNaoya Horiguchi 
1545ab5ac90aSMichal Hocko /*
1546ab5ac90aSMichal Hocko  * Allocates a fresh surplus page from the page allocator.
1547ab5ac90aSMichal Hocko  */
15480c397daeSMichal Hocko static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1549aaf14e40SMichal Hocko 		int nid, nodemask_t *nmask)
15507893d1d5SAdam Litke {
15519980d744SMichal Hocko 	struct page *page = NULL;
15527893d1d5SAdam Litke 
1553bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
1554aa888a74SAndi Kleen 		return NULL;
1555aa888a74SAndi Kleen 
1556d1c3fb1fSNishanth Aravamudan 	spin_lock(&hugetlb_lock);
15579980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
15589980d744SMichal Hocko 		goto out_unlock;
1559d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
1560d1c3fb1fSNishanth Aravamudan 
15610c397daeSMichal Hocko 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
15629980d744SMichal Hocko 	if (!page)
15630c397daeSMichal Hocko 		return NULL;
1564d1c3fb1fSNishanth Aravamudan 
15657893d1d5SAdam Litke 	spin_lock(&hugetlb_lock);
15669980d744SMichal Hocko 	/*
15679980d744SMichal Hocko 	 * We could have raced with the pool size change.
15689980d744SMichal Hocko 	 * Double check that and simply deallocate the new page
15699980d744SMichal Hocko 	 * if we would end up overcommiting the surpluses. Abuse
15709980d744SMichal Hocko 	 * temporary page to workaround the nasty free_huge_page
15719980d744SMichal Hocko 	 * codeflow
15729980d744SMichal Hocko 	 */
15739980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
15749980d744SMichal Hocko 		SetPageHugeTemporary(page);
15759980d744SMichal Hocko 		put_page(page);
15769980d744SMichal Hocko 		page = NULL;
15779980d744SMichal Hocko 	} else {
15789980d744SMichal Hocko 		h->surplus_huge_pages++;
15794704dea3SMichal Hocko 		h->surplus_huge_pages_node[page_to_nid(page)]++;
15807893d1d5SAdam Litke 	}
15819980d744SMichal Hocko 
15829980d744SMichal Hocko out_unlock:
1583d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
15847893d1d5SAdam Litke 
15857893d1d5SAdam Litke 	return page;
15867893d1d5SAdam Litke }
15877893d1d5SAdam Litke 
15880c397daeSMichal Hocko static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1589ab5ac90aSMichal Hocko 		int nid, nodemask_t *nmask)
1590ab5ac90aSMichal Hocko {
1591ab5ac90aSMichal Hocko 	struct page *page;
1592ab5ac90aSMichal Hocko 
1593ab5ac90aSMichal Hocko 	if (hstate_is_gigantic(h))
1594ab5ac90aSMichal Hocko 		return NULL;
1595ab5ac90aSMichal Hocko 
15960c397daeSMichal Hocko 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1597ab5ac90aSMichal Hocko 	if (!page)
1598ab5ac90aSMichal Hocko 		return NULL;
1599ab5ac90aSMichal Hocko 
1600ab5ac90aSMichal Hocko 	/*
1601ab5ac90aSMichal Hocko 	 * We do not account these pages as surplus because they are only
1602ab5ac90aSMichal Hocko 	 * temporary and will be released properly on the last reference
1603ab5ac90aSMichal Hocko 	 */
1604ab5ac90aSMichal Hocko 	SetPageHugeTemporary(page);
1605ab5ac90aSMichal Hocko 
1606ab5ac90aSMichal Hocko 	return page;
1607ab5ac90aSMichal Hocko }
1608ab5ac90aSMichal Hocko 
1609e4e574b7SAdam Litke /*
1610099730d6SDave Hansen  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1611099730d6SDave Hansen  */
1612e0ec90eeSDave Hansen static
16130c397daeSMichal Hocko struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1614099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr)
1615099730d6SDave Hansen {
1616aaf14e40SMichal Hocko 	struct page *page;
1617aaf14e40SMichal Hocko 	struct mempolicy *mpol;
1618aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
1619aaf14e40SMichal Hocko 	int nid;
1620aaf14e40SMichal Hocko 	nodemask_t *nodemask;
1621aaf14e40SMichal Hocko 
1622aaf14e40SMichal Hocko 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
16230c397daeSMichal Hocko 	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1624aaf14e40SMichal Hocko 	mpol_cond_put(mpol);
1625aaf14e40SMichal Hocko 
1626aaf14e40SMichal Hocko 	return page;
1627099730d6SDave Hansen }
1628099730d6SDave Hansen 
1629ab5ac90aSMichal Hocko /* page migration callback function */
1630bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid)
1631bf50bab2SNaoya Horiguchi {
1632aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
16334ef91848SJoonsoo Kim 	struct page *page = NULL;
1634bf50bab2SNaoya Horiguchi 
1635aaf14e40SMichal Hocko 	if (nid != NUMA_NO_NODE)
1636aaf14e40SMichal Hocko 		gfp_mask |= __GFP_THISNODE;
1637aaf14e40SMichal Hocko 
1638bf50bab2SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
16394ef91848SJoonsoo Kim 	if (h->free_huge_pages - h->resv_huge_pages > 0)
16403e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1641bf50bab2SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
1642bf50bab2SNaoya Horiguchi 
164394ae8ba7SAneesh Kumar K.V 	if (!page)
16440c397daeSMichal Hocko 		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1645bf50bab2SNaoya Horiguchi 
1646bf50bab2SNaoya Horiguchi 	return page;
1647bf50bab2SNaoya Horiguchi }
1648bf50bab2SNaoya Horiguchi 
1649ab5ac90aSMichal Hocko /* page migration callback function */
16503e59fcb0SMichal Hocko struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
16513e59fcb0SMichal Hocko 		nodemask_t *nmask)
16524db9b2efSMichal Hocko {
1653aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
16544db9b2efSMichal Hocko 
16554db9b2efSMichal Hocko 	spin_lock(&hugetlb_lock);
16564db9b2efSMichal Hocko 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
16573e59fcb0SMichal Hocko 		struct page *page;
16583e59fcb0SMichal Hocko 
16593e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
16603e59fcb0SMichal Hocko 		if (page) {
16613e59fcb0SMichal Hocko 			spin_unlock(&hugetlb_lock);
16623e59fcb0SMichal Hocko 			return page;
16634db9b2efSMichal Hocko 		}
16644db9b2efSMichal Hocko 	}
16654db9b2efSMichal Hocko 	spin_unlock(&hugetlb_lock);
16664db9b2efSMichal Hocko 
16670c397daeSMichal Hocko 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
16684db9b2efSMichal Hocko }
16694db9b2efSMichal Hocko 
1670ebd63723SMichal Hocko /* mempolicy aware migration callback */
1671389c8178SMichal Hocko struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1672389c8178SMichal Hocko 		unsigned long address)
1673ebd63723SMichal Hocko {
1674ebd63723SMichal Hocko 	struct mempolicy *mpol;
1675ebd63723SMichal Hocko 	nodemask_t *nodemask;
1676ebd63723SMichal Hocko 	struct page *page;
1677ebd63723SMichal Hocko 	gfp_t gfp_mask;
1678ebd63723SMichal Hocko 	int node;
1679ebd63723SMichal Hocko 
1680ebd63723SMichal Hocko 	gfp_mask = htlb_alloc_mask(h);
1681ebd63723SMichal Hocko 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1682ebd63723SMichal Hocko 	page = alloc_huge_page_nodemask(h, node, nodemask);
1683ebd63723SMichal Hocko 	mpol_cond_put(mpol);
1684ebd63723SMichal Hocko 
1685ebd63723SMichal Hocko 	return page;
1686ebd63723SMichal Hocko }
1687ebd63723SMichal Hocko 
1688bf50bab2SNaoya Horiguchi /*
168925985edcSLucas De Marchi  * Increase the hugetlb pool such that it can accommodate a reservation
1690e4e574b7SAdam Litke  * of size 'delta'.
1691e4e574b7SAdam Litke  */
1692a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta)
1693e4e574b7SAdam Litke {
1694e4e574b7SAdam Litke 	struct list_head surplus_list;
1695e4e574b7SAdam Litke 	struct page *page, *tmp;
1696e4e574b7SAdam Litke 	int ret, i;
1697e4e574b7SAdam Litke 	int needed, allocated;
169828073b02SHillf Danton 	bool alloc_ok = true;
1699e4e574b7SAdam Litke 
1700a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1701ac09b3a1SAdam Litke 	if (needed <= 0) {
1702a5516438SAndi Kleen 		h->resv_huge_pages += delta;
1703e4e574b7SAdam Litke 		return 0;
1704ac09b3a1SAdam Litke 	}
1705e4e574b7SAdam Litke 
1706e4e574b7SAdam Litke 	allocated = 0;
1707e4e574b7SAdam Litke 	INIT_LIST_HEAD(&surplus_list);
1708e4e574b7SAdam Litke 
1709e4e574b7SAdam Litke 	ret = -ENOMEM;
1710e4e574b7SAdam Litke retry:
1711e4e574b7SAdam Litke 	spin_unlock(&hugetlb_lock);
1712e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
17130c397daeSMichal Hocko 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1714aaf14e40SMichal Hocko 				NUMA_NO_NODE, NULL);
171528073b02SHillf Danton 		if (!page) {
171628073b02SHillf Danton 			alloc_ok = false;
171728073b02SHillf Danton 			break;
171828073b02SHillf Danton 		}
1719e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
172069ed779aSDavid Rientjes 		cond_resched();
1721e4e574b7SAdam Litke 	}
172228073b02SHillf Danton 	allocated += i;
1723e4e574b7SAdam Litke 
1724e4e574b7SAdam Litke 	/*
1725e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1726e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
1727e4e574b7SAdam Litke 	 */
1728e4e574b7SAdam Litke 	spin_lock(&hugetlb_lock);
1729a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
1730a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
173128073b02SHillf Danton 	if (needed > 0) {
173228073b02SHillf Danton 		if (alloc_ok)
1733e4e574b7SAdam Litke 			goto retry;
173428073b02SHillf Danton 		/*
173528073b02SHillf Danton 		 * We were not able to allocate enough pages to
173628073b02SHillf Danton 		 * satisfy the entire reservation so we free what
173728073b02SHillf Danton 		 * we've allocated so far.
173828073b02SHillf Danton 		 */
173928073b02SHillf Danton 		goto free;
174028073b02SHillf Danton 	}
1741e4e574b7SAdam Litke 	/*
1742e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
174325985edcSLucas De Marchi 	 * needed to accommodate the reservation.  Add the appropriate number
1744e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
1745ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
1746ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
1747ac09b3a1SAdam Litke 	 * before they are reserved.
1748e4e574b7SAdam Litke 	 */
1749e4e574b7SAdam Litke 	needed += allocated;
1750a5516438SAndi Kleen 	h->resv_huge_pages += delta;
1751e4e574b7SAdam Litke 	ret = 0;
1752a9869b83SNaoya Horiguchi 
175319fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
175419fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
175519fc3f0aSAdam Litke 		if ((--needed) < 0)
175619fc3f0aSAdam Litke 			break;
1757a9869b83SNaoya Horiguchi 		/*
1758a9869b83SNaoya Horiguchi 		 * This page is now managed by the hugetlb allocator and has
1759a9869b83SNaoya Horiguchi 		 * no users -- drop the buddy allocator's reference.
1760a9869b83SNaoya Horiguchi 		 */
1761a9869b83SNaoya Horiguchi 		put_page_testzero(page);
1762309381feSSasha Levin 		VM_BUG_ON_PAGE(page_count(page), page);
1763a5516438SAndi Kleen 		enqueue_huge_page(h, page);
176419fc3f0aSAdam Litke 	}
176528073b02SHillf Danton free:
1766b0365c8dSHillf Danton 	spin_unlock(&hugetlb_lock);
176719fc3f0aSAdam Litke 
176819fc3f0aSAdam Litke 	/* Free unnecessary surplus pages to the buddy allocator */
1769c0d934baSJoonsoo Kim 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1770a9869b83SNaoya Horiguchi 		put_page(page);
177119fc3f0aSAdam Litke 	spin_lock(&hugetlb_lock);
1772e4e574b7SAdam Litke 
1773e4e574b7SAdam Litke 	return ret;
1774e4e574b7SAdam Litke }
1775e4e574b7SAdam Litke 
1776e4e574b7SAdam Litke /*
1777e5bbc8a6SMike Kravetz  * This routine has two main purposes:
1778e5bbc8a6SMike Kravetz  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1779e5bbc8a6SMike Kravetz  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1780e5bbc8a6SMike Kravetz  *    to the associated reservation map.
1781e5bbc8a6SMike Kravetz  * 2) Free any unused surplus pages that may have been allocated to satisfy
1782e5bbc8a6SMike Kravetz  *    the reservation.  As many as unused_resv_pages may be freed.
1783e5bbc8a6SMike Kravetz  *
1784e5bbc8a6SMike Kravetz  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1785e5bbc8a6SMike Kravetz  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1786e5bbc8a6SMike Kravetz  * we must make sure nobody else can claim pages we are in the process of
1787e5bbc8a6SMike Kravetz  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1788e5bbc8a6SMike Kravetz  * number of huge pages we plan to free when dropping the lock.
1789e4e574b7SAdam Litke  */
1790a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
1791a5516438SAndi Kleen 					unsigned long unused_resv_pages)
1792e4e574b7SAdam Litke {
1793e4e574b7SAdam Litke 	unsigned long nr_pages;
1794e4e574b7SAdam Litke 
1795aa888a74SAndi Kleen 	/* Cannot return gigantic pages currently */
1796bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
1797e5bbc8a6SMike Kravetz 		goto out;
1798aa888a74SAndi Kleen 
1799e5bbc8a6SMike Kravetz 	/*
1800e5bbc8a6SMike Kravetz 	 * Part (or even all) of the reservation could have been backed
1801e5bbc8a6SMike Kravetz 	 * by pre-allocated pages. Only free surplus pages.
1802e5bbc8a6SMike Kravetz 	 */
1803a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1804e4e574b7SAdam Litke 
1805685f3457SLee Schermerhorn 	/*
1806685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
18079b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
18089b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
18099b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
18109b5e5d0fSLee Schermerhorn 	 * free_pool_huge_page() will balance the the freed pages across the
18119b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
1812e5bbc8a6SMike Kravetz 	 *
1813e5bbc8a6SMike Kravetz 	 * Note that we decrement resv_huge_pages as we free the pages.  If
1814e5bbc8a6SMike Kravetz 	 * we drop the lock, resv_huge_pages will still be sufficiently large
1815e5bbc8a6SMike Kravetz 	 * to cover subsequent pages we may free.
1816685f3457SLee Schermerhorn 	 */
1817685f3457SLee Schermerhorn 	while (nr_pages--) {
1818e5bbc8a6SMike Kravetz 		h->resv_huge_pages--;
1819e5bbc8a6SMike Kravetz 		unused_resv_pages--;
18208cebfcd0SLai Jiangshan 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1821e5bbc8a6SMike Kravetz 			goto out;
18227848a4bfSMizuma, Masayoshi 		cond_resched_lock(&hugetlb_lock);
1823e4e574b7SAdam Litke 	}
1824e5bbc8a6SMike Kravetz 
1825e5bbc8a6SMike Kravetz out:
1826e5bbc8a6SMike Kravetz 	/* Fully uncommit the reservation */
1827e5bbc8a6SMike Kravetz 	h->resv_huge_pages -= unused_resv_pages;
1828e4e574b7SAdam Litke }
1829e4e574b7SAdam Litke 
18305e911373SMike Kravetz 
1831c37f9fb1SAndy Whitcroft /*
1832feba16e2SMike Kravetz  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
18335e911373SMike Kravetz  * are used by the huge page allocation routines to manage reservations.
1834cf3ad20bSMike Kravetz  *
1835cf3ad20bSMike Kravetz  * vma_needs_reservation is called to determine if the huge page at addr
1836cf3ad20bSMike Kravetz  * within the vma has an associated reservation.  If a reservation is
1837cf3ad20bSMike Kravetz  * needed, the value 1 is returned.  The caller is then responsible for
1838cf3ad20bSMike Kravetz  * managing the global reservation and subpool usage counts.  After
1839cf3ad20bSMike Kravetz  * the huge page has been allocated, vma_commit_reservation is called
1840feba16e2SMike Kravetz  * to add the page to the reservation map.  If the page allocation fails,
1841feba16e2SMike Kravetz  * the reservation must be ended instead of committed.  vma_end_reservation
1842feba16e2SMike Kravetz  * is called in such cases.
1843cf3ad20bSMike Kravetz  *
1844cf3ad20bSMike Kravetz  * In the normal case, vma_commit_reservation returns the same value
1845cf3ad20bSMike Kravetz  * as the preceding vma_needs_reservation call.  The only time this
1846cf3ad20bSMike Kravetz  * is not the case is if a reserve map was changed between calls.  It
1847cf3ad20bSMike Kravetz  * is the responsibility of the caller to notice the difference and
1848cf3ad20bSMike Kravetz  * take appropriate action.
184996b96a96SMike Kravetz  *
185096b96a96SMike Kravetz  * vma_add_reservation is used in error paths where a reservation must
185196b96a96SMike Kravetz  * be restored when a newly allocated huge page must be freed.  It is
185296b96a96SMike Kravetz  * to be called after calling vma_needs_reservation to determine if a
185396b96a96SMike Kravetz  * reservation exists.
1854c37f9fb1SAndy Whitcroft  */
18555e911373SMike Kravetz enum vma_resv_mode {
18565e911373SMike Kravetz 	VMA_NEEDS_RESV,
18575e911373SMike Kravetz 	VMA_COMMIT_RESV,
1858feba16e2SMike Kravetz 	VMA_END_RESV,
185996b96a96SMike Kravetz 	VMA_ADD_RESV,
18605e911373SMike Kravetz };
1861cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
1862cf3ad20bSMike Kravetz 				struct vm_area_struct *vma, unsigned long addr,
18635e911373SMike Kravetz 				enum vma_resv_mode mode)
1864c37f9fb1SAndy Whitcroft {
18654e35f483SJoonsoo Kim 	struct resv_map *resv;
18664e35f483SJoonsoo Kim 	pgoff_t idx;
1867cf3ad20bSMike Kravetz 	long ret;
1868c37f9fb1SAndy Whitcroft 
18694e35f483SJoonsoo Kim 	resv = vma_resv_map(vma);
18704e35f483SJoonsoo Kim 	if (!resv)
1871c37f9fb1SAndy Whitcroft 		return 1;
1872c37f9fb1SAndy Whitcroft 
18734e35f483SJoonsoo Kim 	idx = vma_hugecache_offset(h, vma, addr);
18745e911373SMike Kravetz 	switch (mode) {
18755e911373SMike Kravetz 	case VMA_NEEDS_RESV:
1876cf3ad20bSMike Kravetz 		ret = region_chg(resv, idx, idx + 1);
18775e911373SMike Kravetz 		break;
18785e911373SMike Kravetz 	case VMA_COMMIT_RESV:
18795e911373SMike Kravetz 		ret = region_add(resv, idx, idx + 1);
18805e911373SMike Kravetz 		break;
1881feba16e2SMike Kravetz 	case VMA_END_RESV:
18825e911373SMike Kravetz 		region_abort(resv, idx, idx + 1);
18835e911373SMike Kravetz 		ret = 0;
18845e911373SMike Kravetz 		break;
188596b96a96SMike Kravetz 	case VMA_ADD_RESV:
188696b96a96SMike Kravetz 		if (vma->vm_flags & VM_MAYSHARE)
188796b96a96SMike Kravetz 			ret = region_add(resv, idx, idx + 1);
188896b96a96SMike Kravetz 		else {
188996b96a96SMike Kravetz 			region_abort(resv, idx, idx + 1);
189096b96a96SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
189196b96a96SMike Kravetz 		}
189296b96a96SMike Kravetz 		break;
18935e911373SMike Kravetz 	default:
18945e911373SMike Kravetz 		BUG();
18955e911373SMike Kravetz 	}
189684afd99bSAndy Whitcroft 
18974e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE)
1898cf3ad20bSMike Kravetz 		return ret;
189967961f9dSMike Kravetz 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
190067961f9dSMike Kravetz 		/*
190167961f9dSMike Kravetz 		 * In most cases, reserves always exist for private mappings.
190267961f9dSMike Kravetz 		 * However, a file associated with mapping could have been
190367961f9dSMike Kravetz 		 * hole punched or truncated after reserves were consumed.
190467961f9dSMike Kravetz 		 * As subsequent fault on such a range will not use reserves.
190567961f9dSMike Kravetz 		 * Subtle - The reserve map for private mappings has the
190667961f9dSMike Kravetz 		 * opposite meaning than that of shared mappings.  If NO
190767961f9dSMike Kravetz 		 * entry is in the reserve map, it means a reservation exists.
190867961f9dSMike Kravetz 		 * If an entry exists in the reserve map, it means the
190967961f9dSMike Kravetz 		 * reservation has already been consumed.  As a result, the
191067961f9dSMike Kravetz 		 * return value of this routine is the opposite of the
191167961f9dSMike Kravetz 		 * value returned from reserve map manipulation routines above.
191267961f9dSMike Kravetz 		 */
191367961f9dSMike Kravetz 		if (ret)
191467961f9dSMike Kravetz 			return 0;
191567961f9dSMike Kravetz 		else
191667961f9dSMike Kravetz 			return 1;
191767961f9dSMike Kravetz 	}
19184e35f483SJoonsoo Kim 	else
1919cf3ad20bSMike Kravetz 		return ret < 0 ? ret : 0;
192084afd99bSAndy Whitcroft }
1921cf3ad20bSMike Kravetz 
1922cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
1923a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
1924c37f9fb1SAndy Whitcroft {
19255e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1926cf3ad20bSMike Kravetz }
1927c37f9fb1SAndy Whitcroft 
1928cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
1929cf3ad20bSMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
1930cf3ad20bSMike Kravetz {
19315e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
19325e911373SMike Kravetz }
19335e911373SMike Kravetz 
1934feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
19355e911373SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
19365e911373SMike Kravetz {
1937feba16e2SMike Kravetz 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1938c37f9fb1SAndy Whitcroft }
1939c37f9fb1SAndy Whitcroft 
194096b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
194196b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
194296b96a96SMike Kravetz {
194396b96a96SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
194496b96a96SMike Kravetz }
194596b96a96SMike Kravetz 
194696b96a96SMike Kravetz /*
194796b96a96SMike Kravetz  * This routine is called to restore a reservation on error paths.  In the
194896b96a96SMike Kravetz  * specific error paths, a huge page was allocated (via alloc_huge_page)
194996b96a96SMike Kravetz  * and is about to be freed.  If a reservation for the page existed,
195096b96a96SMike Kravetz  * alloc_huge_page would have consumed the reservation and set PagePrivate
195196b96a96SMike Kravetz  * in the newly allocated page.  When the page is freed via free_huge_page,
195296b96a96SMike Kravetz  * the global reservation count will be incremented if PagePrivate is set.
195396b96a96SMike Kravetz  * However, free_huge_page can not adjust the reserve map.  Adjust the
195496b96a96SMike Kravetz  * reserve map here to be consistent with global reserve count adjustments
195596b96a96SMike Kravetz  * to be made by free_huge_page.
195696b96a96SMike Kravetz  */
195796b96a96SMike Kravetz static void restore_reserve_on_error(struct hstate *h,
195896b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long address,
195996b96a96SMike Kravetz 			struct page *page)
196096b96a96SMike Kravetz {
196196b96a96SMike Kravetz 	if (unlikely(PagePrivate(page))) {
196296b96a96SMike Kravetz 		long rc = vma_needs_reservation(h, vma, address);
196396b96a96SMike Kravetz 
196496b96a96SMike Kravetz 		if (unlikely(rc < 0)) {
196596b96a96SMike Kravetz 			/*
196696b96a96SMike Kravetz 			 * Rare out of memory condition in reserve map
196796b96a96SMike Kravetz 			 * manipulation.  Clear PagePrivate so that
196896b96a96SMike Kravetz 			 * global reserve count will not be incremented
196996b96a96SMike Kravetz 			 * by free_huge_page.  This will make it appear
197096b96a96SMike Kravetz 			 * as though the reservation for this page was
197196b96a96SMike Kravetz 			 * consumed.  This may prevent the task from
197296b96a96SMike Kravetz 			 * faulting in the page at a later time.  This
197396b96a96SMike Kravetz 			 * is better than inconsistent global huge page
197496b96a96SMike Kravetz 			 * accounting of reserve counts.
197596b96a96SMike Kravetz 			 */
197696b96a96SMike Kravetz 			ClearPagePrivate(page);
197796b96a96SMike Kravetz 		} else if (rc) {
197896b96a96SMike Kravetz 			rc = vma_add_reservation(h, vma, address);
197996b96a96SMike Kravetz 			if (unlikely(rc < 0))
198096b96a96SMike Kravetz 				/*
198196b96a96SMike Kravetz 				 * See above comment about rare out of
198296b96a96SMike Kravetz 				 * memory condition.
198396b96a96SMike Kravetz 				 */
198496b96a96SMike Kravetz 				ClearPagePrivate(page);
198596b96a96SMike Kravetz 		} else
198696b96a96SMike Kravetz 			vma_end_reservation(h, vma, address);
198796b96a96SMike Kravetz 	}
198896b96a96SMike Kravetz }
198996b96a96SMike Kravetz 
199070c3547eSMike Kravetz struct page *alloc_huge_page(struct vm_area_struct *vma,
199104f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
1992348ea204SAdam Litke {
199390481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
1994a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
1995348ea204SAdam Litke 	struct page *page;
1996d85f69b0SMike Kravetz 	long map_chg, map_commit;
1997d85f69b0SMike Kravetz 	long gbl_chg;
19986d76dcf4SAneesh Kumar K.V 	int ret, idx;
19996d76dcf4SAneesh Kumar K.V 	struct hugetlb_cgroup *h_cg;
20002fc39cecSAdam Litke 
20016d76dcf4SAneesh Kumar K.V 	idx = hstate_index(h);
2002a1e78772SMel Gorman 	/*
2003d85f69b0SMike Kravetz 	 * Examine the region/reserve map to determine if the process
2004d85f69b0SMike Kravetz 	 * has a reservation for the page to be allocated.  A return
2005d85f69b0SMike Kravetz 	 * code of zero indicates a reservation exists (no change).
2006a1e78772SMel Gorman 	 */
2007d85f69b0SMike Kravetz 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2008d85f69b0SMike Kravetz 	if (map_chg < 0)
200976dcee75SAneesh Kumar K.V 		return ERR_PTR(-ENOMEM);
2010d85f69b0SMike Kravetz 
2011d85f69b0SMike Kravetz 	/*
2012d85f69b0SMike Kravetz 	 * Processes that did not create the mapping will have no
2013d85f69b0SMike Kravetz 	 * reserves as indicated by the region/reserve map. Check
2014d85f69b0SMike Kravetz 	 * that the allocation will not exceed the subpool limit.
2015d85f69b0SMike Kravetz 	 * Allocations for MAP_NORESERVE mappings also need to be
2016d85f69b0SMike Kravetz 	 * checked against any subpool limit.
2017d85f69b0SMike Kravetz 	 */
2018d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve) {
2019d85f69b0SMike Kravetz 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2020d85f69b0SMike Kravetz 		if (gbl_chg < 0) {
2021feba16e2SMike Kravetz 			vma_end_reservation(h, vma, addr);
202276dcee75SAneesh Kumar K.V 			return ERR_PTR(-ENOSPC);
20235e911373SMike Kravetz 		}
202490d8b7e6SAdam Litke 
2025d85f69b0SMike Kravetz 		/*
2026d85f69b0SMike Kravetz 		 * Even though there was no reservation in the region/reserve
2027d85f69b0SMike Kravetz 		 * map, there could be reservations associated with the
2028d85f69b0SMike Kravetz 		 * subpool that can be used.  This would be indicated if the
2029d85f69b0SMike Kravetz 		 * return value of hugepage_subpool_get_pages() is zero.
2030d85f69b0SMike Kravetz 		 * However, if avoid_reserve is specified we still avoid even
2031d85f69b0SMike Kravetz 		 * the subpool reservations.
2032d85f69b0SMike Kravetz 		 */
2033d85f69b0SMike Kravetz 		if (avoid_reserve)
2034d85f69b0SMike Kravetz 			gbl_chg = 1;
2035d85f69b0SMike Kravetz 	}
2036d85f69b0SMike Kravetz 
20376d76dcf4SAneesh Kumar K.V 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
20388f34af6fSJianyu Zhan 	if (ret)
20398f34af6fSJianyu Zhan 		goto out_subpool_put;
20408f34af6fSJianyu Zhan 
2041a1e78772SMel Gorman 	spin_lock(&hugetlb_lock);
2042d85f69b0SMike Kravetz 	/*
2043d85f69b0SMike Kravetz 	 * glb_chg is passed to indicate whether or not a page must be taken
2044d85f69b0SMike Kravetz 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2045d85f69b0SMike Kravetz 	 * a reservation exists for the allocation.
2046d85f69b0SMike Kravetz 	 */
2047d85f69b0SMike Kravetz 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
204881a6fcaeSJoonsoo Kim 	if (!page) {
204994ae8ba7SAneesh Kumar K.V 		spin_unlock(&hugetlb_lock);
20500c397daeSMichal Hocko 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
20518f34af6fSJianyu Zhan 		if (!page)
20528f34af6fSJianyu Zhan 			goto out_uncharge_cgroup;
2053a88c7695SNaoya Horiguchi 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2054a88c7695SNaoya Horiguchi 			SetPagePrivate(page);
2055a88c7695SNaoya Horiguchi 			h->resv_huge_pages--;
2056a88c7695SNaoya Horiguchi 		}
205779dbb236SAneesh Kumar K.V 		spin_lock(&hugetlb_lock);
205879dbb236SAneesh Kumar K.V 		list_move(&page->lru, &h->hugepage_activelist);
205981a6fcaeSJoonsoo Kim 		/* Fall through */
2060a1e78772SMel Gorman 	}
206181a6fcaeSJoonsoo Kim 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
206281a6fcaeSJoonsoo Kim 	spin_unlock(&hugetlb_lock);
2063a1e78772SMel Gorman 
206490481622SDavid Gibson 	set_page_private(page, (unsigned long)spool);
2065a1e78772SMel Gorman 
2066d85f69b0SMike Kravetz 	map_commit = vma_commit_reservation(h, vma, addr);
2067d85f69b0SMike Kravetz 	if (unlikely(map_chg > map_commit)) {
206833039678SMike Kravetz 		/*
206933039678SMike Kravetz 		 * The page was added to the reservation map between
207033039678SMike Kravetz 		 * vma_needs_reservation and vma_commit_reservation.
207133039678SMike Kravetz 		 * This indicates a race with hugetlb_reserve_pages.
207233039678SMike Kravetz 		 * Adjust for the subpool count incremented above AND
207333039678SMike Kravetz 		 * in hugetlb_reserve_pages for the same page.  Also,
207433039678SMike Kravetz 		 * the reservation count added in hugetlb_reserve_pages
207533039678SMike Kravetz 		 * no longer applies.
207633039678SMike Kravetz 		 */
207733039678SMike Kravetz 		long rsv_adjust;
207833039678SMike Kravetz 
207933039678SMike Kravetz 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
208033039678SMike Kravetz 		hugetlb_acct_memory(h, -rsv_adjust);
208133039678SMike Kravetz 	}
20827893d1d5SAdam Litke 	return page;
20838f34af6fSJianyu Zhan 
20848f34af6fSJianyu Zhan out_uncharge_cgroup:
20858f34af6fSJianyu Zhan 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
20868f34af6fSJianyu Zhan out_subpool_put:
2087d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve)
20888f34af6fSJianyu Zhan 		hugepage_subpool_put_pages(spool, 1);
2089feba16e2SMike Kravetz 	vma_end_reservation(h, vma, addr);
20908f34af6fSJianyu Zhan 	return ERR_PTR(-ENOSPC);
2091b45b5bd6SDavid Gibson }
2092b45b5bd6SDavid Gibson 
2093e24a1307SAneesh Kumar K.V int alloc_bootmem_huge_page(struct hstate *h)
2094e24a1307SAneesh Kumar K.V 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2095e24a1307SAneesh Kumar K.V int __alloc_bootmem_huge_page(struct hstate *h)
2096aa888a74SAndi Kleen {
2097aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
2098b2261026SJoonsoo Kim 	int nr_nodes, node;
2099aa888a74SAndi Kleen 
2100b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2101aa888a74SAndi Kleen 		void *addr;
2102aa888a74SAndi Kleen 
2103eb31d559SMike Rapoport 		addr = memblock_alloc_try_nid_raw(
21048b89a116SGrygorii Strashko 				huge_page_size(h), huge_page_size(h),
210597ad1087SMike Rapoport 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2106aa888a74SAndi Kleen 		if (addr) {
2107aa888a74SAndi Kleen 			/*
2108aa888a74SAndi Kleen 			 * Use the beginning of the huge page to store the
2109aa888a74SAndi Kleen 			 * huge_bootmem_page struct (until gather_bootmem
2110aa888a74SAndi Kleen 			 * puts them into the mem_map).
2111aa888a74SAndi Kleen 			 */
2112aa888a74SAndi Kleen 			m = addr;
2113aa888a74SAndi Kleen 			goto found;
2114aa888a74SAndi Kleen 		}
2115aa888a74SAndi Kleen 	}
2116aa888a74SAndi Kleen 	return 0;
2117aa888a74SAndi Kleen 
2118aa888a74SAndi Kleen found:
2119df994eadSLuiz Capitulino 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2120aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
2121330d6e48SCannon Matthews 	INIT_LIST_HEAD(&m->list);
2122aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
2123aa888a74SAndi Kleen 	m->hstate = h;
2124aa888a74SAndi Kleen 	return 1;
2125aa888a74SAndi Kleen }
2126aa888a74SAndi Kleen 
2127d00181b9SKirill A. Shutemov static void __init prep_compound_huge_page(struct page *page,
2128d00181b9SKirill A. Shutemov 		unsigned int order)
212918229df5SAndy Whitcroft {
213018229df5SAndy Whitcroft 	if (unlikely(order > (MAX_ORDER - 1)))
213118229df5SAndy Whitcroft 		prep_compound_gigantic_page(page, order);
213218229df5SAndy Whitcroft 	else
213318229df5SAndy Whitcroft 		prep_compound_page(page, order);
213418229df5SAndy Whitcroft }
213518229df5SAndy Whitcroft 
2136aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */
2137aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
2138aa888a74SAndi Kleen {
2139aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
2140aa888a74SAndi Kleen 
2141aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
214240d18ebfSMike Kravetz 		struct page *page = virt_to_page(m);
2143aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
2144ee8f248dSBecky Bruce 
2145aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
214618229df5SAndy Whitcroft 		prep_compound_huge_page(page, h->order);
2147ef5a22beSAndrea Arcangeli 		WARN_ON(PageReserved(page));
2148aa888a74SAndi Kleen 		prep_new_huge_page(h, page, page_to_nid(page));
2149af0fb9dfSMichal Hocko 		put_page(page); /* free it into the hugepage allocator */
2150af0fb9dfSMichal Hocko 
2151b0320c7bSRafael Aquini 		/*
2152b0320c7bSRafael Aquini 		 * If we had gigantic hugepages allocated at boot time, we need
2153b0320c7bSRafael Aquini 		 * to restore the 'stolen' pages to totalram_pages in order to
2154b0320c7bSRafael Aquini 		 * fix confusing memory reports from free(1) and another
2155b0320c7bSRafael Aquini 		 * side-effects, like CommitLimit going negative.
2156b0320c7bSRafael Aquini 		 */
2157bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h))
21583dcc0571SJiang Liu 			adjust_managed_page_count(page, 1 << h->order);
2159520495feSCannon Matthews 		cond_resched();
2160aa888a74SAndi Kleen 	}
2161aa888a74SAndi Kleen }
2162aa888a74SAndi Kleen 
21638faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
21641da177e4SLinus Torvalds {
21651da177e4SLinus Torvalds 	unsigned long i;
21661da177e4SLinus Torvalds 
2167e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
2168bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h)) {
2169aa888a74SAndi Kleen 			if (!alloc_bootmem_huge_page(h))
2170aa888a74SAndi Kleen 				break;
21710c397daeSMichal Hocko 		} else if (!alloc_pool_huge_page(h,
21728cebfcd0SLai Jiangshan 					 &node_states[N_MEMORY]))
21731da177e4SLinus Torvalds 			break;
217469ed779aSDavid Rientjes 		cond_resched();
21751da177e4SLinus Torvalds 	}
2176d715cf80SLiam R. Howlett 	if (i < h->max_huge_pages) {
2177d715cf80SLiam R. Howlett 		char buf[32];
2178d715cf80SLiam R. Howlett 
2179c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2180d715cf80SLiam R. Howlett 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2181d715cf80SLiam R. Howlett 			h->max_huge_pages, buf, i);
21828faa8b07SAndi Kleen 		h->max_huge_pages = i;
2183e5ff2159SAndi Kleen 	}
2184d715cf80SLiam R. Howlett }
2185e5ff2159SAndi Kleen 
2186e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
2187e5ff2159SAndi Kleen {
2188e5ff2159SAndi Kleen 	struct hstate *h;
2189e5ff2159SAndi Kleen 
2190e5ff2159SAndi Kleen 	for_each_hstate(h) {
2191641844f5SNaoya Horiguchi 		if (minimum_order > huge_page_order(h))
2192641844f5SNaoya Horiguchi 			minimum_order = huge_page_order(h);
2193641844f5SNaoya Horiguchi 
21948faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
2195bae7f4aeSLuiz Capitulino 		if (!hstate_is_gigantic(h))
21968faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
2197e5ff2159SAndi Kleen 	}
2198641844f5SNaoya Horiguchi 	VM_BUG_ON(minimum_order == UINT_MAX);
2199e5ff2159SAndi Kleen }
2200e5ff2159SAndi Kleen 
2201e5ff2159SAndi Kleen static void __init report_hugepages(void)
2202e5ff2159SAndi Kleen {
2203e5ff2159SAndi Kleen 	struct hstate *h;
2204e5ff2159SAndi Kleen 
2205e5ff2159SAndi Kleen 	for_each_hstate(h) {
22064abd32dbSAndi Kleen 		char buf[32];
2207c6247f72SMatthew Wilcox 
2208c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2209ffb22af5SAndrew Morton 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2210c6247f72SMatthew Wilcox 			buf, h->free_huge_pages);
2211e5ff2159SAndi Kleen 	}
2212e5ff2159SAndi Kleen }
2213e5ff2159SAndi Kleen 
22141da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
22156ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
22166ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22171da177e4SLinus Torvalds {
22184415cc8dSChristoph Lameter 	int i;
22194415cc8dSChristoph Lameter 
2220bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2221aa888a74SAndi Kleen 		return;
2222aa888a74SAndi Kleen 
22236ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
22241da177e4SLinus Torvalds 		struct page *page, *next;
2225a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
2226a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
2227a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
22286b0c880dSAdam Litke 				return;
22291da177e4SLinus Torvalds 			if (PageHighMem(page))
22301da177e4SLinus Torvalds 				continue;
22311da177e4SLinus Torvalds 			list_del(&page->lru);
2232e5ff2159SAndi Kleen 			update_and_free_page(h, page);
2233a5516438SAndi Kleen 			h->free_huge_pages--;
2234a5516438SAndi Kleen 			h->free_huge_pages_node[page_to_nid(page)]--;
22351da177e4SLinus Torvalds 		}
22361da177e4SLinus Torvalds 	}
22371da177e4SLinus Torvalds }
22381da177e4SLinus Torvalds #else
22396ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
22406ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22411da177e4SLinus Torvalds {
22421da177e4SLinus Torvalds }
22431da177e4SLinus Torvalds #endif
22441da177e4SLinus Torvalds 
224520a0307cSWu Fengguang /*
224620a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
224720a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
224820a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
224920a0307cSWu Fengguang  */
22506ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
22516ae11b27SLee Schermerhorn 				int delta)
225220a0307cSWu Fengguang {
2253b2261026SJoonsoo Kim 	int nr_nodes, node;
225420a0307cSWu Fengguang 
225520a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
225620a0307cSWu Fengguang 
2257e8c5c824SLee Schermerhorn 	if (delta < 0) {
2258b2261026SJoonsoo Kim 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2259b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node])
2260b2261026SJoonsoo Kim 				goto found;
2261b2261026SJoonsoo Kim 		}
2262b2261026SJoonsoo Kim 	} else {
2263b2261026SJoonsoo Kim 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2264b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node] <
2265b2261026SJoonsoo Kim 					h->nr_huge_pages_node[node])
2266b2261026SJoonsoo Kim 				goto found;
2267e8c5c824SLee Schermerhorn 		}
22689a76db09SLee Schermerhorn 	}
2269b2261026SJoonsoo Kim 	return 0;
227020a0307cSWu Fengguang 
2271b2261026SJoonsoo Kim found:
227220a0307cSWu Fengguang 	h->surplus_huge_pages += delta;
2273b2261026SJoonsoo Kim 	h->surplus_huge_pages_node[node] += delta;
2274b2261026SJoonsoo Kim 	return 1;
227520a0307cSWu Fengguang }
227620a0307cSWu Fengguang 
2277a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
22786ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
22796ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22801da177e4SLinus Torvalds {
22817893d1d5SAdam Litke 	unsigned long min_count, ret;
22821da177e4SLinus Torvalds 
2283944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
2284aa888a74SAndi Kleen 		return h->max_huge_pages;
2285aa888a74SAndi Kleen 
22867893d1d5SAdam Litke 	/*
22877893d1d5SAdam Litke 	 * Increase the pool size
22887893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
22897893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
2290d1c3fb1fSNishanth Aravamudan 	 *
22910c397daeSMichal Hocko 	 * We might race with alloc_surplus_huge_page() here and be unable
2292d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
2293d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
2294d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
2295d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
22967893d1d5SAdam Litke 	 */
22971da177e4SLinus Torvalds 	spin_lock(&hugetlb_lock);
2298a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
22996ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
23007893d1d5SAdam Litke 			break;
23017893d1d5SAdam Litke 	}
23027893d1d5SAdam Litke 
2303a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
23047893d1d5SAdam Litke 		/*
23057893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
23067893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
23077893d1d5SAdam Litke 		 * and reducing the surplus.
23087893d1d5SAdam Litke 		 */
23097893d1d5SAdam Litke 		spin_unlock(&hugetlb_lock);
2310649920c6SJia He 
2311649920c6SJia He 		/* yield cpu to avoid soft lockup */
2312649920c6SJia He 		cond_resched();
2313649920c6SJia He 
23140c397daeSMichal Hocko 		ret = alloc_pool_huge_page(h, nodes_allowed);
23157893d1d5SAdam Litke 		spin_lock(&hugetlb_lock);
23167893d1d5SAdam Litke 		if (!ret)
23177893d1d5SAdam Litke 			goto out;
23187893d1d5SAdam Litke 
2319536240f2SMel Gorman 		/* Bail for signals. Probably ctrl-c from user */
2320536240f2SMel Gorman 		if (signal_pending(current))
2321536240f2SMel Gorman 			goto out;
23227893d1d5SAdam Litke 	}
23237893d1d5SAdam Litke 
23247893d1d5SAdam Litke 	/*
23257893d1d5SAdam Litke 	 * Decrease the pool size
23267893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
23277893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
23287893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
23297893d1d5SAdam Litke 	 * to the desired size as pages become free.
2330d1c3fb1fSNishanth Aravamudan 	 *
2331d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
2332d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
2333d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
23340c397daeSMichal Hocko 	 * alloc_surplus_huge_page() is checking the global counter,
2335d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
2336d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
2337d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
23387893d1d5SAdam Litke 	 */
2339a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
23406b0c880dSAdam Litke 	min_count = max(count, min_count);
23416ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
2342a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
23436ae11b27SLee Schermerhorn 		if (!free_pool_huge_page(h, nodes_allowed, 0))
23441da177e4SLinus Torvalds 			break;
234555f67141SMizuma, Masayoshi 		cond_resched_lock(&hugetlb_lock);
23461da177e4SLinus Torvalds 	}
2347a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
23486ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
23497893d1d5SAdam Litke 			break;
23507893d1d5SAdam Litke 	}
23517893d1d5SAdam Litke out:
2352a5516438SAndi Kleen 	ret = persistent_huge_pages(h);
23531da177e4SLinus Torvalds 	spin_unlock(&hugetlb_lock);
23547893d1d5SAdam Litke 	return ret;
23551da177e4SLinus Torvalds }
23561da177e4SLinus Torvalds 
2357a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
2358a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2359a3437870SNishanth Aravamudan 
2360a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
2361a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = \
2362a3437870SNishanth Aravamudan 		__ATTR(_name, 0644, _name##_show, _name##_store)
2363a3437870SNishanth Aravamudan 
2364a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
2365a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2366a3437870SNishanth Aravamudan 
23679a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
23689a305230SLee Schermerhorn 
23699a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2370a3437870SNishanth Aravamudan {
2371a3437870SNishanth Aravamudan 	int i;
23729a305230SLee Schermerhorn 
2373a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
23749a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
23759a305230SLee Schermerhorn 			if (nidp)
23769a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
2377a3437870SNishanth Aravamudan 			return &hstates[i];
23789a305230SLee Schermerhorn 		}
23799a305230SLee Schermerhorn 
23809a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
2381a3437870SNishanth Aravamudan }
2382a3437870SNishanth Aravamudan 
238306808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2384a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2385a3437870SNishanth Aravamudan {
23869a305230SLee Schermerhorn 	struct hstate *h;
23879a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
23889a305230SLee Schermerhorn 	int nid;
23899a305230SLee Schermerhorn 
23909a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
23919a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
23929a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
23939a305230SLee Schermerhorn 	else
23949a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
23959a305230SLee Schermerhorn 
23969a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", nr_huge_pages);
2397a3437870SNishanth Aravamudan }
2398adbe8726SEric B Munson 
2399238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2400238d3c13SDavid Rientjes 					   struct hstate *h, int nid,
2401238d3c13SDavid Rientjes 					   unsigned long count, size_t len)
2402a3437870SNishanth Aravamudan {
2403a3437870SNishanth Aravamudan 	int err;
2404bad44b5bSDavid Rientjes 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2405a3437870SNishanth Aravamudan 
2406944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2407adbe8726SEric B Munson 		err = -EINVAL;
2408adbe8726SEric B Munson 		goto out;
2409adbe8726SEric B Munson 	}
2410adbe8726SEric B Munson 
24119a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
24129a305230SLee Schermerhorn 		/*
24139a305230SLee Schermerhorn 		 * global hstate attribute
24149a305230SLee Schermerhorn 		 */
24159a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
24169a305230SLee Schermerhorn 				init_nodemask_of_mempolicy(nodes_allowed))) {
241706808b08SLee Schermerhorn 			NODEMASK_FREE(nodes_allowed);
24188cebfcd0SLai Jiangshan 			nodes_allowed = &node_states[N_MEMORY];
241906808b08SLee Schermerhorn 		}
24209a305230SLee Schermerhorn 	} else if (nodes_allowed) {
24219a305230SLee Schermerhorn 		/*
24229a305230SLee Schermerhorn 		 * per node hstate attribute: adjust count to global,
24239a305230SLee Schermerhorn 		 * but restrict alloc/free to the specified node.
24249a305230SLee Schermerhorn 		 */
24259a305230SLee Schermerhorn 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
24269a305230SLee Schermerhorn 		init_nodemask_of_node(nodes_allowed, nid);
24279a305230SLee Schermerhorn 	} else
24288cebfcd0SLai Jiangshan 		nodes_allowed = &node_states[N_MEMORY];
24299a305230SLee Schermerhorn 
243006808b08SLee Schermerhorn 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2431a3437870SNishanth Aravamudan 
24328cebfcd0SLai Jiangshan 	if (nodes_allowed != &node_states[N_MEMORY])
243306808b08SLee Schermerhorn 		NODEMASK_FREE(nodes_allowed);
243406808b08SLee Schermerhorn 
243506808b08SLee Schermerhorn 	return len;
2436adbe8726SEric B Munson out:
2437adbe8726SEric B Munson 	NODEMASK_FREE(nodes_allowed);
2438adbe8726SEric B Munson 	return err;
243906808b08SLee Schermerhorn }
244006808b08SLee Schermerhorn 
2441238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2442238d3c13SDavid Rientjes 					 struct kobject *kobj, const char *buf,
2443238d3c13SDavid Rientjes 					 size_t len)
2444238d3c13SDavid Rientjes {
2445238d3c13SDavid Rientjes 	struct hstate *h;
2446238d3c13SDavid Rientjes 	unsigned long count;
2447238d3c13SDavid Rientjes 	int nid;
2448238d3c13SDavid Rientjes 	int err;
2449238d3c13SDavid Rientjes 
2450238d3c13SDavid Rientjes 	err = kstrtoul(buf, 10, &count);
2451238d3c13SDavid Rientjes 	if (err)
2452238d3c13SDavid Rientjes 		return err;
2453238d3c13SDavid Rientjes 
2454238d3c13SDavid Rientjes 	h = kobj_to_hstate(kobj, &nid);
2455238d3c13SDavid Rientjes 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2456238d3c13SDavid Rientjes }
2457238d3c13SDavid Rientjes 
245806808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
245906808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
246006808b08SLee Schermerhorn {
246106808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
246206808b08SLee Schermerhorn }
246306808b08SLee Schermerhorn 
246406808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
246506808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
246606808b08SLee Schermerhorn {
2467238d3c13SDavid Rientjes 	return nr_hugepages_store_common(false, kobj, buf, len);
2468a3437870SNishanth Aravamudan }
2469a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
2470a3437870SNishanth Aravamudan 
247106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
247206808b08SLee Schermerhorn 
247306808b08SLee Schermerhorn /*
247406808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
247506808b08SLee Schermerhorn  * huge page alloc/free.
247606808b08SLee Schermerhorn  */
247706808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
247806808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
247906808b08SLee Schermerhorn {
248006808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
248106808b08SLee Schermerhorn }
248206808b08SLee Schermerhorn 
248306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
248406808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
248506808b08SLee Schermerhorn {
2486238d3c13SDavid Rientjes 	return nr_hugepages_store_common(true, kobj, buf, len);
248706808b08SLee Schermerhorn }
248806808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
248906808b08SLee Schermerhorn #endif
249006808b08SLee Schermerhorn 
249106808b08SLee Schermerhorn 
2492a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2493a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2494a3437870SNishanth Aravamudan {
24959a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2496a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2497a3437870SNishanth Aravamudan }
2498adbe8726SEric B Munson 
2499a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2500a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
2501a3437870SNishanth Aravamudan {
2502a3437870SNishanth Aravamudan 	int err;
2503a3437870SNishanth Aravamudan 	unsigned long input;
25049a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2505a3437870SNishanth Aravamudan 
2506bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2507adbe8726SEric B Munson 		return -EINVAL;
2508adbe8726SEric B Munson 
25093dbb95f7SJingoo Han 	err = kstrtoul(buf, 10, &input);
2510a3437870SNishanth Aravamudan 	if (err)
251173ae31e5SEric B Munson 		return err;
2512a3437870SNishanth Aravamudan 
2513a3437870SNishanth Aravamudan 	spin_lock(&hugetlb_lock);
2514a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
2515a3437870SNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
2516a3437870SNishanth Aravamudan 
2517a3437870SNishanth Aravamudan 	return count;
2518a3437870SNishanth Aravamudan }
2519a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
2520a3437870SNishanth Aravamudan 
2521a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
2522a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2523a3437870SNishanth Aravamudan {
25249a305230SLee Schermerhorn 	struct hstate *h;
25259a305230SLee Schermerhorn 	unsigned long free_huge_pages;
25269a305230SLee Schermerhorn 	int nid;
25279a305230SLee Schermerhorn 
25289a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
25299a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
25309a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
25319a305230SLee Schermerhorn 	else
25329a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
25339a305230SLee Schermerhorn 
25349a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", free_huge_pages);
2535a3437870SNishanth Aravamudan }
2536a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
2537a3437870SNishanth Aravamudan 
2538a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
2539a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2540a3437870SNishanth Aravamudan {
25419a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2542a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2543a3437870SNishanth Aravamudan }
2544a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
2545a3437870SNishanth Aravamudan 
2546a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
2547a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2548a3437870SNishanth Aravamudan {
25499a305230SLee Schermerhorn 	struct hstate *h;
25509a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
25519a305230SLee Schermerhorn 	int nid;
25529a305230SLee Schermerhorn 
25539a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
25549a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
25559a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
25569a305230SLee Schermerhorn 	else
25579a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
25589a305230SLee Schermerhorn 
25599a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2560a3437870SNishanth Aravamudan }
2561a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
2562a3437870SNishanth Aravamudan 
2563a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
2564a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
2565a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
2566a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
2567a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
2568a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
256906808b08SLee Schermerhorn #ifdef CONFIG_NUMA
257006808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
257106808b08SLee Schermerhorn #endif
2572a3437870SNishanth Aravamudan 	NULL,
2573a3437870SNishanth Aravamudan };
2574a3437870SNishanth Aravamudan 
257567e5ed96SArvind Yadav static const struct attribute_group hstate_attr_group = {
2576a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
2577a3437870SNishanth Aravamudan };
2578a3437870SNishanth Aravamudan 
2579094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
25809a305230SLee Schermerhorn 				    struct kobject **hstate_kobjs,
258167e5ed96SArvind Yadav 				    const struct attribute_group *hstate_attr_group)
2582a3437870SNishanth Aravamudan {
2583a3437870SNishanth Aravamudan 	int retval;
2584972dc4deSAneesh Kumar K.V 	int hi = hstate_index(h);
2585a3437870SNishanth Aravamudan 
25869a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
25879a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
2588a3437870SNishanth Aravamudan 		return -ENOMEM;
2589a3437870SNishanth Aravamudan 
25909a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2591a3437870SNishanth Aravamudan 	if (retval)
25929a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
2593a3437870SNishanth Aravamudan 
2594a3437870SNishanth Aravamudan 	return retval;
2595a3437870SNishanth Aravamudan }
2596a3437870SNishanth Aravamudan 
2597a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void)
2598a3437870SNishanth Aravamudan {
2599a3437870SNishanth Aravamudan 	struct hstate *h;
2600a3437870SNishanth Aravamudan 	int err;
2601a3437870SNishanth Aravamudan 
2602a3437870SNishanth Aravamudan 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2603a3437870SNishanth Aravamudan 	if (!hugepages_kobj)
2604a3437870SNishanth Aravamudan 		return;
2605a3437870SNishanth Aravamudan 
2606a3437870SNishanth Aravamudan 	for_each_hstate(h) {
26079a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
26089a305230SLee Schermerhorn 					 hstate_kobjs, &hstate_attr_group);
2609a3437870SNishanth Aravamudan 		if (err)
2610ffb22af5SAndrew Morton 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2611a3437870SNishanth Aravamudan 	}
2612a3437870SNishanth Aravamudan }
2613a3437870SNishanth Aravamudan 
26149a305230SLee Schermerhorn #ifdef CONFIG_NUMA
26159a305230SLee Schermerhorn 
26169a305230SLee Schermerhorn /*
26179a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
261810fbcf4cSKay Sievers  * with node devices in node_devices[] using a parallel array.  The array
261910fbcf4cSKay Sievers  * index of a node device or _hstate == node id.
262010fbcf4cSKay Sievers  * This is here to avoid any static dependency of the node device driver, in
26219a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
26229a305230SLee Schermerhorn  */
26239a305230SLee Schermerhorn struct node_hstate {
26249a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
26259a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
26269a305230SLee Schermerhorn };
2627b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
26289a305230SLee Schermerhorn 
26299a305230SLee Schermerhorn /*
263010fbcf4cSKay Sievers  * A subset of global hstate attributes for node devices
26319a305230SLee Schermerhorn  */
26329a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
26339a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
26349a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
26359a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
26369a305230SLee Schermerhorn 	NULL,
26379a305230SLee Schermerhorn };
26389a305230SLee Schermerhorn 
263967e5ed96SArvind Yadav static const struct attribute_group per_node_hstate_attr_group = {
26409a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
26419a305230SLee Schermerhorn };
26429a305230SLee Schermerhorn 
26439a305230SLee Schermerhorn /*
264410fbcf4cSKay Sievers  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
26459a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
26469a305230SLee Schermerhorn  */
26479a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
26489a305230SLee Schermerhorn {
26499a305230SLee Schermerhorn 	int nid;
26509a305230SLee Schermerhorn 
26519a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
26529a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
26539a305230SLee Schermerhorn 		int i;
26549a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
26559a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
26569a305230SLee Schermerhorn 				if (nidp)
26579a305230SLee Schermerhorn 					*nidp = nid;
26589a305230SLee Schermerhorn 				return &hstates[i];
26599a305230SLee Schermerhorn 			}
26609a305230SLee Schermerhorn 	}
26619a305230SLee Schermerhorn 
26629a305230SLee Schermerhorn 	BUG();
26639a305230SLee Schermerhorn 	return NULL;
26649a305230SLee Schermerhorn }
26659a305230SLee Schermerhorn 
26669a305230SLee Schermerhorn /*
266710fbcf4cSKay Sievers  * Unregister hstate attributes from a single node device.
26689a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
26699a305230SLee Schermerhorn  */
26703cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node)
26719a305230SLee Schermerhorn {
26729a305230SLee Schermerhorn 	struct hstate *h;
267310fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
26749a305230SLee Schermerhorn 
26759a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
26769b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
26779a305230SLee Schermerhorn 
2678972dc4deSAneesh Kumar K.V 	for_each_hstate(h) {
2679972dc4deSAneesh Kumar K.V 		int idx = hstate_index(h);
2680972dc4deSAneesh Kumar K.V 		if (nhs->hstate_kobjs[idx]) {
2681972dc4deSAneesh Kumar K.V 			kobject_put(nhs->hstate_kobjs[idx]);
2682972dc4deSAneesh Kumar K.V 			nhs->hstate_kobjs[idx] = NULL;
2683972dc4deSAneesh Kumar K.V 		}
26849a305230SLee Schermerhorn 	}
26859a305230SLee Schermerhorn 
26869a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
26879a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
26889a305230SLee Schermerhorn }
26899a305230SLee Schermerhorn 
26909a305230SLee Schermerhorn 
26919a305230SLee Schermerhorn /*
269210fbcf4cSKay Sievers  * Register hstate attributes for a single node device.
26939a305230SLee Schermerhorn  * No-op if attributes already registered.
26949a305230SLee Schermerhorn  */
26953cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node)
26969a305230SLee Schermerhorn {
26979a305230SLee Schermerhorn 	struct hstate *h;
269810fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
26999a305230SLee Schermerhorn 	int err;
27009a305230SLee Schermerhorn 
27019a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
27029a305230SLee Schermerhorn 		return;		/* already allocated */
27039a305230SLee Schermerhorn 
27049a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
270510fbcf4cSKay Sievers 							&node->dev.kobj);
27069a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
27079a305230SLee Schermerhorn 		return;
27089a305230SLee Schermerhorn 
27099a305230SLee Schermerhorn 	for_each_hstate(h) {
27109a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
27119a305230SLee Schermerhorn 						nhs->hstate_kobjs,
27129a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
27139a305230SLee Schermerhorn 		if (err) {
2714ffb22af5SAndrew Morton 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
271510fbcf4cSKay Sievers 				h->name, node->dev.id);
27169a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
27179a305230SLee Schermerhorn 			break;
27189a305230SLee Schermerhorn 		}
27199a305230SLee Schermerhorn 	}
27209a305230SLee Schermerhorn }
27219a305230SLee Schermerhorn 
27229a305230SLee Schermerhorn /*
27239b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
272410fbcf4cSKay Sievers  * devices of nodes that have memory.  All on-line nodes should have
272510fbcf4cSKay Sievers  * registered their associated device by this time.
27269a305230SLee Schermerhorn  */
27277d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
27289a305230SLee Schermerhorn {
27299a305230SLee Schermerhorn 	int nid;
27309a305230SLee Schermerhorn 
27318cebfcd0SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
27328732794bSWen Congyang 		struct node *node = node_devices[nid];
273310fbcf4cSKay Sievers 		if (node->dev.id == nid)
27349a305230SLee Schermerhorn 			hugetlb_register_node(node);
27359a305230SLee Schermerhorn 	}
27369a305230SLee Schermerhorn 
27379a305230SLee Schermerhorn 	/*
273810fbcf4cSKay Sievers 	 * Let the node device driver know we're here so it can
27399a305230SLee Schermerhorn 	 * [un]register hstate attributes on node hotplug.
27409a305230SLee Schermerhorn 	 */
27419a305230SLee Schermerhorn 	register_hugetlbfs_with_node(hugetlb_register_node,
27429a305230SLee Schermerhorn 				     hugetlb_unregister_node);
27439a305230SLee Schermerhorn }
27449a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
27459a305230SLee Schermerhorn 
27469a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
27479a305230SLee Schermerhorn {
27489a305230SLee Schermerhorn 	BUG();
27499a305230SLee Schermerhorn 	if (nidp)
27509a305230SLee Schermerhorn 		*nidp = -1;
27519a305230SLee Schermerhorn 	return NULL;
27529a305230SLee Schermerhorn }
27539a305230SLee Schermerhorn 
27549a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
27559a305230SLee Schermerhorn 
27569a305230SLee Schermerhorn #endif
27579a305230SLee Schermerhorn 
2758a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
2759a3437870SNishanth Aravamudan {
27608382d914SDavidlohr Bueso 	int i;
27618382d914SDavidlohr Bueso 
2762457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
27630ef89d25SBenjamin Herrenschmidt 		return 0;
2764a3437870SNishanth Aravamudan 
2765e11bfbfcSNick Piggin 	if (!size_to_hstate(default_hstate_size)) {
2766d715cf80SLiam R. Howlett 		if (default_hstate_size != 0) {
2767d715cf80SLiam R. Howlett 			pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2768d715cf80SLiam R. Howlett 			       default_hstate_size, HPAGE_SIZE);
2769d715cf80SLiam R. Howlett 		}
2770d715cf80SLiam R. Howlett 
2771e11bfbfcSNick Piggin 		default_hstate_size = HPAGE_SIZE;
2772e11bfbfcSNick Piggin 		if (!size_to_hstate(default_hstate_size))
2773a3437870SNishanth Aravamudan 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2774a3437870SNishanth Aravamudan 	}
2775972dc4deSAneesh Kumar K.V 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2776f8b74815SVaishali Thakkar 	if (default_hstate_max_huge_pages) {
2777f8b74815SVaishali Thakkar 		if (!default_hstate.max_huge_pages)
2778e11bfbfcSNick Piggin 			default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2779f8b74815SVaishali Thakkar 	}
2780a3437870SNishanth Aravamudan 
2781a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
2782aa888a74SAndi Kleen 	gather_bootmem_prealloc();
2783a3437870SNishanth Aravamudan 	report_hugepages();
2784a3437870SNishanth Aravamudan 
2785a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
27869a305230SLee Schermerhorn 	hugetlb_register_all_nodes();
27877179e7bfSJianguo Wu 	hugetlb_cgroup_file_init();
27889a305230SLee Schermerhorn 
27898382d914SDavidlohr Bueso #ifdef CONFIG_SMP
27908382d914SDavidlohr Bueso 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
27918382d914SDavidlohr Bueso #else
27928382d914SDavidlohr Bueso 	num_fault_mutexes = 1;
27938382d914SDavidlohr Bueso #endif
2794c672c7f2SMike Kravetz 	hugetlb_fault_mutex_table =
27956da2ec56SKees Cook 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
27966da2ec56SKees Cook 			      GFP_KERNEL);
2797c672c7f2SMike Kravetz 	BUG_ON(!hugetlb_fault_mutex_table);
27988382d914SDavidlohr Bueso 
27998382d914SDavidlohr Bueso 	for (i = 0; i < num_fault_mutexes; i++)
2800c672c7f2SMike Kravetz 		mutex_init(&hugetlb_fault_mutex_table[i]);
2801a3437870SNishanth Aravamudan 	return 0;
2802a3437870SNishanth Aravamudan }
28033e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
2804a3437870SNishanth Aravamudan 
2805a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */
28069fee021dSVaishali Thakkar void __init hugetlb_bad_size(void)
28079fee021dSVaishali Thakkar {
28089fee021dSVaishali Thakkar 	parsed_valid_hugepagesz = false;
28099fee021dSVaishali Thakkar }
28109fee021dSVaishali Thakkar 
2811d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
2812a3437870SNishanth Aravamudan {
2813a3437870SNishanth Aravamudan 	struct hstate *h;
28148faa8b07SAndi Kleen 	unsigned long i;
28158faa8b07SAndi Kleen 
2816a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
2817598d8091SJoe Perches 		pr_warn("hugepagesz= specified twice, ignoring\n");
2818a3437870SNishanth Aravamudan 		return;
2819a3437870SNishanth Aravamudan 	}
282047d38344SAneesh Kumar K.V 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2821a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
282247d38344SAneesh Kumar K.V 	h = &hstates[hugetlb_max_hstate++];
2823a3437870SNishanth Aravamudan 	h->order = order;
2824a3437870SNishanth Aravamudan 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
28258faa8b07SAndi Kleen 	h->nr_huge_pages = 0;
28268faa8b07SAndi Kleen 	h->free_huge_pages = 0;
28278faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
28288faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
28290edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&h->hugepage_activelist);
283054f18d35SAndrew Morton 	h->next_nid_to_alloc = first_memory_node;
283154f18d35SAndrew Morton 	h->next_nid_to_free = first_memory_node;
2832a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2833a3437870SNishanth Aravamudan 					huge_page_size(h)/1024);
28348faa8b07SAndi Kleen 
2835a3437870SNishanth Aravamudan 	parsed_hstate = h;
2836a3437870SNishanth Aravamudan }
2837a3437870SNishanth Aravamudan 
2838e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s)
2839a3437870SNishanth Aravamudan {
2840a3437870SNishanth Aravamudan 	unsigned long *mhp;
28418faa8b07SAndi Kleen 	static unsigned long *last_mhp;
2842a3437870SNishanth Aravamudan 
28439fee021dSVaishali Thakkar 	if (!parsed_valid_hugepagesz) {
28449fee021dSVaishali Thakkar 		pr_warn("hugepages = %s preceded by "
28459fee021dSVaishali Thakkar 			"an unsupported hugepagesz, ignoring\n", s);
28469fee021dSVaishali Thakkar 		parsed_valid_hugepagesz = true;
28479fee021dSVaishali Thakkar 		return 1;
28489fee021dSVaishali Thakkar 	}
2849a3437870SNishanth Aravamudan 	/*
285047d38344SAneesh Kumar K.V 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2851a3437870SNishanth Aravamudan 	 * so this hugepages= parameter goes to the "default hstate".
2852a3437870SNishanth Aravamudan 	 */
28539fee021dSVaishali Thakkar 	else if (!hugetlb_max_hstate)
2854a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
2855a3437870SNishanth Aravamudan 	else
2856a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
2857a3437870SNishanth Aravamudan 
28588faa8b07SAndi Kleen 	if (mhp == last_mhp) {
2859598d8091SJoe Perches 		pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
28608faa8b07SAndi Kleen 		return 1;
28618faa8b07SAndi Kleen 	}
28628faa8b07SAndi Kleen 
2863a3437870SNishanth Aravamudan 	if (sscanf(s, "%lu", mhp) <= 0)
2864a3437870SNishanth Aravamudan 		*mhp = 0;
2865a3437870SNishanth Aravamudan 
28668faa8b07SAndi Kleen 	/*
28678faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
28688faa8b07SAndi Kleen 	 * But we need to allocate >= MAX_ORDER hstates here early to still
28698faa8b07SAndi Kleen 	 * use the bootmem allocator.
28708faa8b07SAndi Kleen 	 */
287147d38344SAneesh Kumar K.V 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
28728faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
28738faa8b07SAndi Kleen 
28748faa8b07SAndi Kleen 	last_mhp = mhp;
28758faa8b07SAndi Kleen 
2876a3437870SNishanth Aravamudan 	return 1;
2877a3437870SNishanth Aravamudan }
2878e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup);
2879e11bfbfcSNick Piggin 
2880e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s)
2881e11bfbfcSNick Piggin {
2882e11bfbfcSNick Piggin 	default_hstate_size = memparse(s, &s);
2883e11bfbfcSNick Piggin 	return 1;
2884e11bfbfcSNick Piggin }
2885e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup);
2886a3437870SNishanth Aravamudan 
28878a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array)
28888a213460SNishanth Aravamudan {
28898a213460SNishanth Aravamudan 	int node;
28908a213460SNishanth Aravamudan 	unsigned int nr = 0;
28918a213460SNishanth Aravamudan 
28928a213460SNishanth Aravamudan 	for_each_node_mask(node, cpuset_current_mems_allowed)
28938a213460SNishanth Aravamudan 		nr += array[node];
28948a213460SNishanth Aravamudan 
28958a213460SNishanth Aravamudan 	return nr;
28968a213460SNishanth Aravamudan }
28978a213460SNishanth Aravamudan 
28988a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
289906808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
290006808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
290106808b08SLee Schermerhorn 			 void __user *buffer, size_t *length, loff_t *ppos)
29021da177e4SLinus Torvalds {
2903e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
2904238d3c13SDavid Rientjes 	unsigned long tmp = h->max_huge_pages;
290508d4a246SMichal Hocko 	int ret;
2906e5ff2159SAndi Kleen 
2907457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
290886613628SJan Stancek 		return -EOPNOTSUPP;
2909457c1b27SNishanth Aravamudan 
2910e5ff2159SAndi Kleen 	table->data = &tmp;
2911e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
291208d4a246SMichal Hocko 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
291308d4a246SMichal Hocko 	if (ret)
291408d4a246SMichal Hocko 		goto out;
2915e5ff2159SAndi Kleen 
2916238d3c13SDavid Rientjes 	if (write)
2917238d3c13SDavid Rientjes 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
2918238d3c13SDavid Rientjes 						  NUMA_NO_NODE, tmp, *length);
291908d4a246SMichal Hocko out:
292008d4a246SMichal Hocko 	return ret;
29211da177e4SLinus Torvalds }
2922396faf03SMel Gorman 
292306808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
292406808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
292506808b08SLee Schermerhorn {
292606808b08SLee Schermerhorn 
292706808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
292806808b08SLee Schermerhorn 							buffer, length, ppos);
292906808b08SLee Schermerhorn }
293006808b08SLee Schermerhorn 
293106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
293206808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
293306808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
293406808b08SLee Schermerhorn {
293506808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
293606808b08SLee Schermerhorn 							buffer, length, ppos);
293706808b08SLee Schermerhorn }
293806808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
293906808b08SLee Schermerhorn 
2940a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
29418d65af78SAlexey Dobriyan 			void __user *buffer,
2942a3d0c6aaSNishanth Aravamudan 			size_t *length, loff_t *ppos)
2943a3d0c6aaSNishanth Aravamudan {
2944a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
2945e5ff2159SAndi Kleen 	unsigned long tmp;
294608d4a246SMichal Hocko 	int ret;
2947e5ff2159SAndi Kleen 
2948457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
294986613628SJan Stancek 		return -EOPNOTSUPP;
2950457c1b27SNishanth Aravamudan 
2951e5ff2159SAndi Kleen 	tmp = h->nr_overcommit_huge_pages;
2952e5ff2159SAndi Kleen 
2953bae7f4aeSLuiz Capitulino 	if (write && hstate_is_gigantic(h))
2954adbe8726SEric B Munson 		return -EINVAL;
2955adbe8726SEric B Munson 
2956e5ff2159SAndi Kleen 	table->data = &tmp;
2957e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
295808d4a246SMichal Hocko 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
295908d4a246SMichal Hocko 	if (ret)
296008d4a246SMichal Hocko 		goto out;
2961e5ff2159SAndi Kleen 
2962e5ff2159SAndi Kleen 	if (write) {
2963064d9efeSNishanth Aravamudan 		spin_lock(&hugetlb_lock);
2964e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
2965a3d0c6aaSNishanth Aravamudan 		spin_unlock(&hugetlb_lock);
2966e5ff2159SAndi Kleen 	}
296708d4a246SMichal Hocko out:
296808d4a246SMichal Hocko 	return ret;
2969a3d0c6aaSNishanth Aravamudan }
2970a3d0c6aaSNishanth Aravamudan 
29711da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
29721da177e4SLinus Torvalds 
2973e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
29741da177e4SLinus Torvalds {
2975fcb2b0c5SRoman Gushchin 	struct hstate *h;
2976fcb2b0c5SRoman Gushchin 	unsigned long total = 0;
2977fcb2b0c5SRoman Gushchin 
2978457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
2979457c1b27SNishanth Aravamudan 		return;
2980fcb2b0c5SRoman Gushchin 
2981fcb2b0c5SRoman Gushchin 	for_each_hstate(h) {
2982fcb2b0c5SRoman Gushchin 		unsigned long count = h->nr_huge_pages;
2983fcb2b0c5SRoman Gushchin 
2984fcb2b0c5SRoman Gushchin 		total += (PAGE_SIZE << huge_page_order(h)) * count;
2985fcb2b0c5SRoman Gushchin 
2986fcb2b0c5SRoman Gushchin 		if (h == &default_hstate)
2987e1759c21SAlexey Dobriyan 			seq_printf(m,
29881da177e4SLinus Torvalds 				   "HugePages_Total:   %5lu\n"
29891da177e4SLinus Torvalds 				   "HugePages_Free:    %5lu\n"
2990b45b5bd6SDavid Gibson 				   "HugePages_Rsvd:    %5lu\n"
29917893d1d5SAdam Litke 				   "HugePages_Surp:    %5lu\n"
29924f98a2feSRik van Riel 				   "Hugepagesize:   %8lu kB\n",
2993fcb2b0c5SRoman Gushchin 				   count,
2994a5516438SAndi Kleen 				   h->free_huge_pages,
2995a5516438SAndi Kleen 				   h->resv_huge_pages,
2996a5516438SAndi Kleen 				   h->surplus_huge_pages,
2997fcb2b0c5SRoman Gushchin 				   (PAGE_SIZE << huge_page_order(h)) / 1024);
2998fcb2b0c5SRoman Gushchin 	}
2999fcb2b0c5SRoman Gushchin 
3000fcb2b0c5SRoman Gushchin 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
30011da177e4SLinus Torvalds }
30021da177e4SLinus Torvalds 
30031da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf)
30041da177e4SLinus Torvalds {
3005a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
3006457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
3007457c1b27SNishanth Aravamudan 		return 0;
30081da177e4SLinus Torvalds 	return sprintf(buf,
30091da177e4SLinus Torvalds 		"Node %d HugePages_Total: %5u\n"
3010a1de0919SNishanth Aravamudan 		"Node %d HugePages_Free:  %5u\n"
3011a1de0919SNishanth Aravamudan 		"Node %d HugePages_Surp:  %5u\n",
3012a5516438SAndi Kleen 		nid, h->nr_huge_pages_node[nid],
3013a5516438SAndi Kleen 		nid, h->free_huge_pages_node[nid],
3014a5516438SAndi Kleen 		nid, h->surplus_huge_pages_node[nid]);
30151da177e4SLinus Torvalds }
30161da177e4SLinus Torvalds 
3017949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void)
3018949f7ec5SDavid Rientjes {
3019949f7ec5SDavid Rientjes 	struct hstate *h;
3020949f7ec5SDavid Rientjes 	int nid;
3021949f7ec5SDavid Rientjes 
3022457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
3023457c1b27SNishanth Aravamudan 		return;
3024457c1b27SNishanth Aravamudan 
3025949f7ec5SDavid Rientjes 	for_each_node_state(nid, N_MEMORY)
3026949f7ec5SDavid Rientjes 		for_each_hstate(h)
3027949f7ec5SDavid Rientjes 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3028949f7ec5SDavid Rientjes 				nid,
3029949f7ec5SDavid Rientjes 				h->nr_huge_pages_node[nid],
3030949f7ec5SDavid Rientjes 				h->free_huge_pages_node[nid],
3031949f7ec5SDavid Rientjes 				h->surplus_huge_pages_node[nid],
3032949f7ec5SDavid Rientjes 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3033949f7ec5SDavid Rientjes }
3034949f7ec5SDavid Rientjes 
30355d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
30365d317b2bSNaoya Horiguchi {
30375d317b2bSNaoya Horiguchi 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
30385d317b2bSNaoya Horiguchi 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
30395d317b2bSNaoya Horiguchi }
30405d317b2bSNaoya Horiguchi 
30411da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
30421da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
30431da177e4SLinus Torvalds {
3044d0028588SWanpeng Li 	struct hstate *h;
3045d0028588SWanpeng Li 	unsigned long nr_total_pages = 0;
3046d0028588SWanpeng Li 
3047d0028588SWanpeng Li 	for_each_hstate(h)
3048d0028588SWanpeng Li 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3049d0028588SWanpeng Li 	return nr_total_pages;
30501da177e4SLinus Torvalds }
30511da177e4SLinus Torvalds 
3052a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
3053fc1b8a73SMel Gorman {
3054fc1b8a73SMel Gorman 	int ret = -ENOMEM;
3055fc1b8a73SMel Gorman 
3056fc1b8a73SMel Gorman 	spin_lock(&hugetlb_lock);
3057fc1b8a73SMel Gorman 	/*
3058fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
3059fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
3060fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
3061fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
3062fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
3063fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
3064fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
3065fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
3066fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
3067fc1b8a73SMel Gorman 	 *
3068fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
3069fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
3070fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
3071fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
3072fc1b8a73SMel Gorman 	 * semantics that cpuset has.
3073fc1b8a73SMel Gorman 	 */
3074fc1b8a73SMel Gorman 	if (delta > 0) {
3075a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
3076fc1b8a73SMel Gorman 			goto out;
3077fc1b8a73SMel Gorman 
3078a5516438SAndi Kleen 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3079a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
3080fc1b8a73SMel Gorman 			goto out;
3081fc1b8a73SMel Gorman 		}
3082fc1b8a73SMel Gorman 	}
3083fc1b8a73SMel Gorman 
3084fc1b8a73SMel Gorman 	ret = 0;
3085fc1b8a73SMel Gorman 	if (delta < 0)
3086a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
3087fc1b8a73SMel Gorman 
3088fc1b8a73SMel Gorman out:
3089fc1b8a73SMel Gorman 	spin_unlock(&hugetlb_lock);
3090fc1b8a73SMel Gorman 	return ret;
3091fc1b8a73SMel Gorman }
3092fc1b8a73SMel Gorman 
309384afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
309484afd99bSAndy Whitcroft {
3095f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
309684afd99bSAndy Whitcroft 
309784afd99bSAndy Whitcroft 	/*
309884afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
309984afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
310084afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
310125985edcSLucas De Marchi 	 * has a reference to the reservation map it cannot disappear until
310284afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
310384afd99bSAndy Whitcroft 	 * new reference here without additional locking.
310484afd99bSAndy Whitcroft 	 */
31054e35f483SJoonsoo Kim 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3106f522c3acSJoonsoo Kim 		kref_get(&resv->refs);
310784afd99bSAndy Whitcroft }
310884afd99bSAndy Whitcroft 
3109a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3110a1e78772SMel Gorman {
3111a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3112f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
311390481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
31144e35f483SJoonsoo Kim 	unsigned long reserve, start, end;
31151c5ecae3SMike Kravetz 	long gbl_reserve;
311684afd99bSAndy Whitcroft 
31174e35f483SJoonsoo Kim 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
31184e35f483SJoonsoo Kim 		return;
31194e35f483SJoonsoo Kim 
3120a5516438SAndi Kleen 	start = vma_hugecache_offset(h, vma, vma->vm_start);
3121a5516438SAndi Kleen 	end = vma_hugecache_offset(h, vma, vma->vm_end);
312284afd99bSAndy Whitcroft 
31234e35f483SJoonsoo Kim 	reserve = (end - start) - region_count(resv, start, end);
312484afd99bSAndy Whitcroft 
3125f031dd27SJoonsoo Kim 	kref_put(&resv->refs, resv_map_release);
312684afd99bSAndy Whitcroft 
31277251ff78SAdam Litke 	if (reserve) {
31281c5ecae3SMike Kravetz 		/*
31291c5ecae3SMike Kravetz 		 * Decrement reserve counts.  The global reserve count may be
31301c5ecae3SMike Kravetz 		 * adjusted if the subpool has a minimum size.
31311c5ecae3SMike Kravetz 		 */
31321c5ecae3SMike Kravetz 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
31331c5ecae3SMike Kravetz 		hugetlb_acct_memory(h, -gbl_reserve);
31347251ff78SAdam Litke 	}
3135a1e78772SMel Gorman }
3136a1e78772SMel Gorman 
313731383c68SDan Williams static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
313831383c68SDan Williams {
313931383c68SDan Williams 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
314031383c68SDan Williams 		return -EINVAL;
314131383c68SDan Williams 	return 0;
314231383c68SDan Williams }
314331383c68SDan Williams 
314405ea8860SDan Williams static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
314505ea8860SDan Williams {
314605ea8860SDan Williams 	struct hstate *hstate = hstate_vma(vma);
314705ea8860SDan Williams 
314805ea8860SDan Williams 	return 1UL << huge_page_shift(hstate);
314905ea8860SDan Williams }
315005ea8860SDan Williams 
31511da177e4SLinus Torvalds /*
31521da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
31531da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
31541da177e4SLinus Torvalds  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
31551da177e4SLinus Torvalds  * this far.
31561da177e4SLinus Torvalds  */
3157b3ec9f33SSouptick Joarder static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
31581da177e4SLinus Torvalds {
31591da177e4SLinus Torvalds 	BUG();
3160d0217ac0SNick Piggin 	return 0;
31611da177e4SLinus Torvalds }
31621da177e4SLinus Torvalds 
3163eec3636aSJane Chu /*
3164eec3636aSJane Chu  * When a new function is introduced to vm_operations_struct and added
3165eec3636aSJane Chu  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3166eec3636aSJane Chu  * This is because under System V memory model, mappings created via
3167eec3636aSJane Chu  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3168eec3636aSJane Chu  * their original vm_ops are overwritten with shm_vm_ops.
3169eec3636aSJane Chu  */
3170f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
3171d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
317284afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
3173a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
317431383c68SDan Williams 	.split = hugetlb_vm_op_split,
317505ea8860SDan Williams 	.pagesize = hugetlb_vm_op_pagesize,
31761da177e4SLinus Torvalds };
31771da177e4SLinus Torvalds 
31781e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
31791e8f889bSDavid Gibson 				int writable)
318063551ae0SDavid Gibson {
318163551ae0SDavid Gibson 	pte_t entry;
318263551ae0SDavid Gibson 
31831e8f889bSDavid Gibson 	if (writable) {
3184106c992aSGerald Schaefer 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3185106c992aSGerald Schaefer 					 vma->vm_page_prot)));
318663551ae0SDavid Gibson 	} else {
3187106c992aSGerald Schaefer 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3188106c992aSGerald Schaefer 					   vma->vm_page_prot));
318963551ae0SDavid Gibson 	}
319063551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
319163551ae0SDavid Gibson 	entry = pte_mkhuge(entry);
3192d9ed9faaSChris Metcalf 	entry = arch_make_huge_pte(entry, vma, page, writable);
319363551ae0SDavid Gibson 
319463551ae0SDavid Gibson 	return entry;
319563551ae0SDavid Gibson }
319663551ae0SDavid Gibson 
31971e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
31981e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
31991e8f889bSDavid Gibson {
32001e8f889bSDavid Gibson 	pte_t entry;
32011e8f889bSDavid Gibson 
3202106c992aSGerald Schaefer 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
320332f84528SChris Forbes 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
32044b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
32051e8f889bSDavid Gibson }
32061e8f889bSDavid Gibson 
3207d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte)
32084a705fefSNaoya Horiguchi {
32094a705fefSNaoya Horiguchi 	swp_entry_t swp;
32104a705fefSNaoya Horiguchi 
32114a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
3212d5ed7444SAneesh Kumar K.V 		return false;
32134a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
32144a705fefSNaoya Horiguchi 	if (non_swap_entry(swp) && is_migration_entry(swp))
3215d5ed7444SAneesh Kumar K.V 		return true;
32164a705fefSNaoya Horiguchi 	else
3217d5ed7444SAneesh Kumar K.V 		return false;
32184a705fefSNaoya Horiguchi }
32194a705fefSNaoya Horiguchi 
32204a705fefSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte)
32214a705fefSNaoya Horiguchi {
32224a705fefSNaoya Horiguchi 	swp_entry_t swp;
32234a705fefSNaoya Horiguchi 
32244a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
32254a705fefSNaoya Horiguchi 		return 0;
32264a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
32274a705fefSNaoya Horiguchi 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
32284a705fefSNaoya Horiguchi 		return 1;
32294a705fefSNaoya Horiguchi 	else
32304a705fefSNaoya Horiguchi 		return 0;
32314a705fefSNaoya Horiguchi }
32321e8f889bSDavid Gibson 
323363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
323463551ae0SDavid Gibson 			    struct vm_area_struct *vma)
323563551ae0SDavid Gibson {
32365e41540cSMike Kravetz 	pte_t *src_pte, *dst_pte, entry, dst_entry;
323763551ae0SDavid Gibson 	struct page *ptepage;
32381c59827dSHugh Dickins 	unsigned long addr;
32391e8f889bSDavid Gibson 	int cow;
3240a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3241a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
3242e8569dd2SAndreas Sandberg 	unsigned long mmun_start;	/* For mmu_notifiers */
3243e8569dd2SAndreas Sandberg 	unsigned long mmun_end;		/* For mmu_notifiers */
3244e8569dd2SAndreas Sandberg 	int ret = 0;
32451e8f889bSDavid Gibson 
32461e8f889bSDavid Gibson 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
324763551ae0SDavid Gibson 
3248e8569dd2SAndreas Sandberg 	mmun_start = vma->vm_start;
3249e8569dd2SAndreas Sandberg 	mmun_end = vma->vm_end;
3250e8569dd2SAndreas Sandberg 	if (cow)
3251e8569dd2SAndreas Sandberg 		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3252e8569dd2SAndreas Sandberg 
3253a5516438SAndi Kleen 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3254cb900f41SKirill A. Shutemov 		spinlock_t *src_ptl, *dst_ptl;
32557868a208SPunit Agrawal 		src_pte = huge_pte_offset(src, addr, sz);
3256c74df32cSHugh Dickins 		if (!src_pte)
3257c74df32cSHugh Dickins 			continue;
3258a5516438SAndi Kleen 		dst_pte = huge_pte_alloc(dst, addr, sz);
3259e8569dd2SAndreas Sandberg 		if (!dst_pte) {
3260e8569dd2SAndreas Sandberg 			ret = -ENOMEM;
3261e8569dd2SAndreas Sandberg 			break;
3262e8569dd2SAndreas Sandberg 		}
3263c5c99429SLarry Woodman 
32645e41540cSMike Kravetz 		/*
32655e41540cSMike Kravetz 		 * If the pagetables are shared don't copy or take references.
32665e41540cSMike Kravetz 		 * dst_pte == src_pte is the common case of src/dest sharing.
32675e41540cSMike Kravetz 		 *
32685e41540cSMike Kravetz 		 * However, src could have 'unshared' and dst shares with
32695e41540cSMike Kravetz 		 * another vma.  If dst_pte !none, this implies sharing.
32705e41540cSMike Kravetz 		 * Check here before taking page table lock, and once again
32715e41540cSMike Kravetz 		 * after taking the lock below.
32725e41540cSMike Kravetz 		 */
32735e41540cSMike Kravetz 		dst_entry = huge_ptep_get(dst_pte);
32745e41540cSMike Kravetz 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3275c5c99429SLarry Woodman 			continue;
3276c5c99429SLarry Woodman 
3277cb900f41SKirill A. Shutemov 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3278cb900f41SKirill A. Shutemov 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3279cb900f41SKirill A. Shutemov 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
32804a705fefSNaoya Horiguchi 		entry = huge_ptep_get(src_pte);
32815e41540cSMike Kravetz 		dst_entry = huge_ptep_get(dst_pte);
32825e41540cSMike Kravetz 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
32835e41540cSMike Kravetz 			/*
32845e41540cSMike Kravetz 			 * Skip if src entry none.  Also, skip in the
32855e41540cSMike Kravetz 			 * unlikely case dst entry !none as this implies
32865e41540cSMike Kravetz 			 * sharing with another vma.
32875e41540cSMike Kravetz 			 */
32884a705fefSNaoya Horiguchi 			;
32894a705fefSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
32904a705fefSNaoya Horiguchi 				    is_hugetlb_entry_hwpoisoned(entry))) {
32914a705fefSNaoya Horiguchi 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
32924a705fefSNaoya Horiguchi 
32934a705fefSNaoya Horiguchi 			if (is_write_migration_entry(swp_entry) && cow) {
32944a705fefSNaoya Horiguchi 				/*
32954a705fefSNaoya Horiguchi 				 * COW mappings require pages in both
32964a705fefSNaoya Horiguchi 				 * parent and child to be set to read.
32974a705fefSNaoya Horiguchi 				 */
32984a705fefSNaoya Horiguchi 				make_migration_entry_read(&swp_entry);
32994a705fefSNaoya Horiguchi 				entry = swp_entry_to_pte(swp_entry);
3300e5251fd4SPunit Agrawal 				set_huge_swap_pte_at(src, addr, src_pte,
3301e5251fd4SPunit Agrawal 						     entry, sz);
33024a705fefSNaoya Horiguchi 			}
3303e5251fd4SPunit Agrawal 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
33044a705fefSNaoya Horiguchi 		} else {
330534ee645eSJoerg Roedel 			if (cow) {
33060f10851eSJérôme Glisse 				/*
33070f10851eSJérôme Glisse 				 * No need to notify as we are downgrading page
33080f10851eSJérôme Glisse 				 * table protection not changing it to point
33090f10851eSJérôme Glisse 				 * to a new page.
33100f10851eSJérôme Glisse 				 *
3311ad56b738SMike Rapoport 				 * See Documentation/vm/mmu_notifier.rst
33120f10851eSJérôme Glisse 				 */
33137f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
331434ee645eSJoerg Roedel 			}
33150253d634SNaoya Horiguchi 			entry = huge_ptep_get(src_pte);
331663551ae0SDavid Gibson 			ptepage = pte_page(entry);
331763551ae0SDavid Gibson 			get_page(ptepage);
331853f9263bSKirill A. Shutemov 			page_dup_rmap(ptepage, true);
331963551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
33205d317b2bSNaoya Horiguchi 			hugetlb_count_add(pages_per_huge_page(h), dst);
33211c59827dSHugh Dickins 		}
3322cb900f41SKirill A. Shutemov 		spin_unlock(src_ptl);
3323cb900f41SKirill A. Shutemov 		spin_unlock(dst_ptl);
332463551ae0SDavid Gibson 	}
332563551ae0SDavid Gibson 
3326e8569dd2SAndreas Sandberg 	if (cow)
3327e8569dd2SAndreas Sandberg 		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3328e8569dd2SAndreas Sandberg 
3329e8569dd2SAndreas Sandberg 	return ret;
333063551ae0SDavid Gibson }
333163551ae0SDavid Gibson 
333224669e58SAneesh Kumar K.V void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
333324669e58SAneesh Kumar K.V 			    unsigned long start, unsigned long end,
333424669e58SAneesh Kumar K.V 			    struct page *ref_page)
333563551ae0SDavid Gibson {
333663551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
333763551ae0SDavid Gibson 	unsigned long address;
3338c7546f8fSDavid Gibson 	pte_t *ptep;
333963551ae0SDavid Gibson 	pte_t pte;
3340cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
334163551ae0SDavid Gibson 	struct page *page;
3342a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3343a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
3344dff11abeSMike Kravetz 	unsigned long mmun_start = start;	/* For mmu_notifiers */
3345dff11abeSMike Kravetz 	unsigned long mmun_end   = end;		/* For mmu_notifiers */
3346a5516438SAndi Kleen 
334763551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
3348a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
3349a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
335063551ae0SDavid Gibson 
335107e32661SAneesh Kumar K.V 	/*
335207e32661SAneesh Kumar K.V 	 * This is a hugetlb vma, all the pte entries should point
335307e32661SAneesh Kumar K.V 	 * to huge page.
335407e32661SAneesh Kumar K.V 	 */
335507e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, sz);
335624669e58SAneesh Kumar K.V 	tlb_start_vma(tlb, vma);
3357dff11abeSMike Kravetz 
3358dff11abeSMike Kravetz 	/*
3359dff11abeSMike Kravetz 	 * If sharing possible, alert mmu notifiers of worst case.
3360dff11abeSMike Kravetz 	 */
3361dff11abeSMike Kravetz 	adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
33622ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3363569f48b8SHillf Danton 	address = start;
3364569f48b8SHillf Danton 	for (; address < end; address += sz) {
33657868a208SPunit Agrawal 		ptep = huge_pte_offset(mm, address, sz);
3366c7546f8fSDavid Gibson 		if (!ptep)
3367c7546f8fSDavid Gibson 			continue;
3368c7546f8fSDavid Gibson 
3369cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
337031d49da5SAneesh Kumar K.V 		if (huge_pmd_unshare(mm, &address, ptep)) {
337131d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
3372dff11abeSMike Kravetz 			/*
3373dff11abeSMike Kravetz 			 * We just unmapped a page of PMDs by clearing a PUD.
3374dff11abeSMike Kravetz 			 * The caller's TLB flush range should cover this area.
3375dff11abeSMike Kravetz 			 */
337631d49da5SAneesh Kumar K.V 			continue;
337731d49da5SAneesh Kumar K.V 		}
337839dde65cSChen, Kenneth W 
33796629326bSHillf Danton 		pte = huge_ptep_get(ptep);
338031d49da5SAneesh Kumar K.V 		if (huge_pte_none(pte)) {
338131d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
338231d49da5SAneesh Kumar K.V 			continue;
338331d49da5SAneesh Kumar K.V 		}
33846629326bSHillf Danton 
33856629326bSHillf Danton 		/*
33869fbc1f63SNaoya Horiguchi 		 * Migrating hugepage or HWPoisoned hugepage is already
33879fbc1f63SNaoya Horiguchi 		 * unmapped and its refcount is dropped, so just clear pte here.
33886629326bSHillf Danton 		 */
33899fbc1f63SNaoya Horiguchi 		if (unlikely(!pte_present(pte))) {
33909386fac3SPunit Agrawal 			huge_pte_clear(mm, address, ptep, sz);
339131d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
339231d49da5SAneesh Kumar K.V 			continue;
33938c4894c6SNaoya Horiguchi 		}
33946629326bSHillf Danton 
33956629326bSHillf Danton 		page = pte_page(pte);
339604f2cbe3SMel Gorman 		/*
339704f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
339804f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
339904f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
340004f2cbe3SMel Gorman 		 */
340104f2cbe3SMel Gorman 		if (ref_page) {
340231d49da5SAneesh Kumar K.V 			if (page != ref_page) {
340331d49da5SAneesh Kumar K.V 				spin_unlock(ptl);
340431d49da5SAneesh Kumar K.V 				continue;
340531d49da5SAneesh Kumar K.V 			}
340604f2cbe3SMel Gorman 			/*
340704f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
340804f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
340904f2cbe3SMel Gorman 			 * looking like data was lost
341004f2cbe3SMel Gorman 			 */
341104f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
341204f2cbe3SMel Gorman 		}
341304f2cbe3SMel Gorman 
3414c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3415b528e4b6SAneesh Kumar K.V 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3416106c992aSGerald Schaefer 		if (huge_pte_dirty(pte))
34176649a386SKen Chen 			set_page_dirty(page);
34189e81130bSHillf Danton 
34195d317b2bSNaoya Horiguchi 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3420d281ee61SKirill A. Shutemov 		page_remove_rmap(page, true);
342131d49da5SAneesh Kumar K.V 
3422cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
3423e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, huge_page_size(h));
342424669e58SAneesh Kumar K.V 		/*
342531d49da5SAneesh Kumar K.V 		 * Bail out after unmapping reference page if supplied
342624669e58SAneesh Kumar K.V 		 */
342731d49da5SAneesh Kumar K.V 		if (ref_page)
342831d49da5SAneesh Kumar K.V 			break;
3429fe1668aeSChen, Kenneth W 	}
34302ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
343124669e58SAneesh Kumar K.V 	tlb_end_vma(tlb, vma);
34321da177e4SLinus Torvalds }
343363551ae0SDavid Gibson 
3434d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3435d833352aSMel Gorman 			  struct vm_area_struct *vma, unsigned long start,
3436d833352aSMel Gorman 			  unsigned long end, struct page *ref_page)
3437d833352aSMel Gorman {
3438d833352aSMel Gorman 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3439d833352aSMel Gorman 
3440d833352aSMel Gorman 	/*
3441d833352aSMel Gorman 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3442d833352aSMel Gorman 	 * test will fail on a vma being torn down, and not grab a page table
3443d833352aSMel Gorman 	 * on its way out.  We're lucky that the flag has such an appropriate
3444d833352aSMel Gorman 	 * name, and can in fact be safely cleared here. We could clear it
3445d833352aSMel Gorman 	 * before the __unmap_hugepage_range above, but all that's necessary
3446c8c06efaSDavidlohr Bueso 	 * is to clear it before releasing the i_mmap_rwsem. This works
3447d833352aSMel Gorman 	 * because in the context this is called, the VMA is about to be
3448c8c06efaSDavidlohr Bueso 	 * destroyed and the i_mmap_rwsem is held.
3449d833352aSMel Gorman 	 */
3450d833352aSMel Gorman 	vma->vm_flags &= ~VM_MAYSHARE;
3451d833352aSMel Gorman }
3452d833352aSMel Gorman 
3453502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
345404f2cbe3SMel Gorman 			  unsigned long end, struct page *ref_page)
3455502717f4SChen, Kenneth W {
345624669e58SAneesh Kumar K.V 	struct mm_struct *mm;
345724669e58SAneesh Kumar K.V 	struct mmu_gather tlb;
3458dff11abeSMike Kravetz 	unsigned long tlb_start = start;
3459dff11abeSMike Kravetz 	unsigned long tlb_end = end;
3460dff11abeSMike Kravetz 
3461dff11abeSMike Kravetz 	/*
3462dff11abeSMike Kravetz 	 * If shared PMDs were possibly used within this vma range, adjust
3463dff11abeSMike Kravetz 	 * start/end for worst case tlb flushing.
3464dff11abeSMike Kravetz 	 * Note that we can not be sure if PMDs are shared until we try to
3465dff11abeSMike Kravetz 	 * unmap pages.  However, we want to make sure TLB flushing covers
3466dff11abeSMike Kravetz 	 * the largest possible range.
3467dff11abeSMike Kravetz 	 */
3468dff11abeSMike Kravetz 	adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
346924669e58SAneesh Kumar K.V 
347024669e58SAneesh Kumar K.V 	mm = vma->vm_mm;
347124669e58SAneesh Kumar K.V 
3472dff11abeSMike Kravetz 	tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
347324669e58SAneesh Kumar K.V 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3474dff11abeSMike Kravetz 	tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3475502717f4SChen, Kenneth W }
3476502717f4SChen, Kenneth W 
347704f2cbe3SMel Gorman /*
347804f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
347904f2cbe3SMel Gorman  * mappping it owns the reserve page for. The intention is to unmap the page
348004f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
348104f2cbe3SMel Gorman  * same region.
348204f2cbe3SMel Gorman  */
34832f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
34842a4b3dedSHarvey Harrison 			      struct page *page, unsigned long address)
348504f2cbe3SMel Gorman {
34867526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
348704f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
348804f2cbe3SMel Gorman 	struct address_space *mapping;
348904f2cbe3SMel Gorman 	pgoff_t pgoff;
349004f2cbe3SMel Gorman 
349104f2cbe3SMel Gorman 	/*
349204f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
349304f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
349404f2cbe3SMel Gorman 	 */
34957526674dSAdam Litke 	address = address & huge_page_mask(h);
349636e4f20aSMichal Hocko 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
349736e4f20aSMichal Hocko 			vma->vm_pgoff;
349893c76a3dSAl Viro 	mapping = vma->vm_file->f_mapping;
349904f2cbe3SMel Gorman 
35004eb2b1dcSMel Gorman 	/*
35014eb2b1dcSMel Gorman 	 * Take the mapping lock for the duration of the table walk. As
35024eb2b1dcSMel Gorman 	 * this mapping should be shared between all the VMAs,
35034eb2b1dcSMel Gorman 	 * __unmap_hugepage_range() is called as the lock is already held
35044eb2b1dcSMel Gorman 	 */
350583cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
35066b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
350704f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
350804f2cbe3SMel Gorman 		if (iter_vma == vma)
350904f2cbe3SMel Gorman 			continue;
351004f2cbe3SMel Gorman 
351104f2cbe3SMel Gorman 		/*
35122f84a899SMel Gorman 		 * Shared VMAs have their own reserves and do not affect
35132f84a899SMel Gorman 		 * MAP_PRIVATE accounting but it is possible that a shared
35142f84a899SMel Gorman 		 * VMA is using the same page so check and skip such VMAs.
35152f84a899SMel Gorman 		 */
35162f84a899SMel Gorman 		if (iter_vma->vm_flags & VM_MAYSHARE)
35172f84a899SMel Gorman 			continue;
35182f84a899SMel Gorman 
35192f84a899SMel Gorman 		/*
352004f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
352104f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
352204f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
352304f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
352404f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
352504f2cbe3SMel Gorman 		 */
352604f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
352724669e58SAneesh Kumar K.V 			unmap_hugepage_range(iter_vma, address,
352824669e58SAneesh Kumar K.V 					     address + huge_page_size(h), page);
352904f2cbe3SMel Gorman 	}
353083cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
353104f2cbe3SMel Gorman }
353204f2cbe3SMel Gorman 
35330fe6e20bSNaoya Horiguchi /*
35340fe6e20bSNaoya Horiguchi  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3535ef009b25SMichal Hocko  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3536ef009b25SMichal Hocko  * cannot race with other handlers or page migration.
3537ef009b25SMichal Hocko  * Keep the pte_same checks anyway to make transition from the mutex easier.
35380fe6e20bSNaoya Horiguchi  */
35392b740303SSouptick Joarder static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3540974e6d66SHuang Ying 		       unsigned long address, pte_t *ptep,
3541cb900f41SKirill A. Shutemov 		       struct page *pagecache_page, spinlock_t *ptl)
35421e8f889bSDavid Gibson {
35433999f52eSAneesh Kumar K.V 	pte_t pte;
3544a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
35451e8f889bSDavid Gibson 	struct page *old_page, *new_page;
35462b740303SSouptick Joarder 	int outside_reserve = 0;
35472b740303SSouptick Joarder 	vm_fault_t ret = 0;
35482ec74c3eSSagi Grimberg 	unsigned long mmun_start;	/* For mmu_notifiers */
35492ec74c3eSSagi Grimberg 	unsigned long mmun_end;		/* For mmu_notifiers */
3550974e6d66SHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
35511e8f889bSDavid Gibson 
35523999f52eSAneesh Kumar K.V 	pte = huge_ptep_get(ptep);
35531e8f889bSDavid Gibson 	old_page = pte_page(pte);
35541e8f889bSDavid Gibson 
355504f2cbe3SMel Gorman retry_avoidcopy:
35561e8f889bSDavid Gibson 	/* If no-one else is actually using this page, avoid the copy
35571e8f889bSDavid Gibson 	 * and just make the page writable */
355837a2140dSJoonsoo Kim 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
35595a49973dSHugh Dickins 		page_move_anon_rmap(old_page, vma);
35605b7a1d40SHuang Ying 		set_huge_ptep_writable(vma, haddr, ptep);
356183c54070SNick Piggin 		return 0;
35621e8f889bSDavid Gibson 	}
35631e8f889bSDavid Gibson 
356404f2cbe3SMel Gorman 	/*
356504f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
356604f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
356704f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
356804f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
356904f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
357004f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
357104f2cbe3SMel Gorman 	 * of the full address range.
357204f2cbe3SMel Gorman 	 */
35735944d011SJoonsoo Kim 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
357404f2cbe3SMel Gorman 			old_page != pagecache_page)
357504f2cbe3SMel Gorman 		outside_reserve = 1;
357604f2cbe3SMel Gorman 
357709cbfeafSKirill A. Shutemov 	get_page(old_page);
3578b76c8cfbSLarry Woodman 
3579ad4404a2SDavidlohr Bueso 	/*
3580ad4404a2SDavidlohr Bueso 	 * Drop page table lock as buddy allocator may be called. It will
3581ad4404a2SDavidlohr Bueso 	 * be acquired again before returning to the caller, as expected.
3582ad4404a2SDavidlohr Bueso 	 */
3583cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
35845b7a1d40SHuang Ying 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
35851e8f889bSDavid Gibson 
35862fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
358704f2cbe3SMel Gorman 		/*
358804f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
358904f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
359004f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
359104f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
359204f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
359304f2cbe3SMel Gorman 		 */
359404f2cbe3SMel Gorman 		if (outside_reserve) {
359509cbfeafSKirill A. Shutemov 			put_page(old_page);
359604f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
35975b7a1d40SHuang Ying 			unmap_ref_private(mm, vma, old_page, haddr);
359804f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
3599cb900f41SKirill A. Shutemov 			spin_lock(ptl);
36005b7a1d40SHuang Ying 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3601a9af0c5dSNaoya Horiguchi 			if (likely(ptep &&
3602a9af0c5dSNaoya Horiguchi 				   pte_same(huge_ptep_get(ptep), pte)))
360304f2cbe3SMel Gorman 				goto retry_avoidcopy;
3604a734bcc8SHillf Danton 			/*
3605cb900f41SKirill A. Shutemov 			 * race occurs while re-acquiring page table
3606cb900f41SKirill A. Shutemov 			 * lock, and our job is done.
3607a734bcc8SHillf Danton 			 */
3608a734bcc8SHillf Danton 			return 0;
360904f2cbe3SMel Gorman 		}
361004f2cbe3SMel Gorman 
36112b740303SSouptick Joarder 		ret = vmf_error(PTR_ERR(new_page));
3612ad4404a2SDavidlohr Bueso 		goto out_release_old;
36131e8f889bSDavid Gibson 	}
36141e8f889bSDavid Gibson 
36150fe6e20bSNaoya Horiguchi 	/*
36160fe6e20bSNaoya Horiguchi 	 * When the original hugepage is shared one, it does not have
36170fe6e20bSNaoya Horiguchi 	 * anon_vma prepared.
36180fe6e20bSNaoya Horiguchi 	 */
361944e2aa93SDean Nelson 	if (unlikely(anon_vma_prepare(vma))) {
3620ad4404a2SDavidlohr Bueso 		ret = VM_FAULT_OOM;
3621ad4404a2SDavidlohr Bueso 		goto out_release_all;
362244e2aa93SDean Nelson 	}
36230fe6e20bSNaoya Horiguchi 
3624974e6d66SHuang Ying 	copy_user_huge_page(new_page, old_page, address, vma,
362547ad8475SAndrea Arcangeli 			    pages_per_huge_page(h));
36260ed361deSNick Piggin 	__SetPageUptodate(new_page);
3627bcc54222SNaoya Horiguchi 	set_page_huge_active(new_page);
36281e8f889bSDavid Gibson 
36295b7a1d40SHuang Ying 	mmun_start = haddr;
36302ec74c3eSSagi Grimberg 	mmun_end = mmun_start + huge_page_size(h);
36312ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3632ad4404a2SDavidlohr Bueso 
3633b76c8cfbSLarry Woodman 	/*
3634cb900f41SKirill A. Shutemov 	 * Retake the page table lock to check for racing updates
3635b76c8cfbSLarry Woodman 	 * before the page tables are altered
3636b76c8cfbSLarry Woodman 	 */
3637cb900f41SKirill A. Shutemov 	spin_lock(ptl);
36385b7a1d40SHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3639a9af0c5dSNaoya Horiguchi 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
364007443a85SJoonsoo Kim 		ClearPagePrivate(new_page);
364107443a85SJoonsoo Kim 
36421e8f889bSDavid Gibson 		/* Break COW */
36435b7a1d40SHuang Ying 		huge_ptep_clear_flush(vma, haddr, ptep);
364434ee645eSJoerg Roedel 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
36455b7a1d40SHuang Ying 		set_huge_pte_at(mm, haddr, ptep,
36461e8f889bSDavid Gibson 				make_huge_pte(vma, new_page, 1));
3647d281ee61SKirill A. Shutemov 		page_remove_rmap(old_page, true);
36485b7a1d40SHuang Ying 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
36491e8f889bSDavid Gibson 		/* Make the old page be freed below */
36501e8f889bSDavid Gibson 		new_page = old_page;
36511e8f889bSDavid Gibson 	}
3652cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
36532ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3654ad4404a2SDavidlohr Bueso out_release_all:
36555b7a1d40SHuang Ying 	restore_reserve_on_error(h, vma, haddr, new_page);
365609cbfeafSKirill A. Shutemov 	put_page(new_page);
3657ad4404a2SDavidlohr Bueso out_release_old:
365809cbfeafSKirill A. Shutemov 	put_page(old_page);
36598312034fSJoonsoo Kim 
3660ad4404a2SDavidlohr Bueso 	spin_lock(ptl); /* Caller expects lock to be held */
3661ad4404a2SDavidlohr Bueso 	return ret;
36621e8f889bSDavid Gibson }
36631e8f889bSDavid Gibson 
366404f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */
3665a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3666a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
366704f2cbe3SMel Gorman {
366804f2cbe3SMel Gorman 	struct address_space *mapping;
3669e7c4b0bfSAndy Whitcroft 	pgoff_t idx;
367004f2cbe3SMel Gorman 
367104f2cbe3SMel Gorman 	mapping = vma->vm_file->f_mapping;
3672a5516438SAndi Kleen 	idx = vma_hugecache_offset(h, vma, address);
367304f2cbe3SMel Gorman 
367404f2cbe3SMel Gorman 	return find_lock_page(mapping, idx);
367504f2cbe3SMel Gorman }
367604f2cbe3SMel Gorman 
36773ae77f43SHugh Dickins /*
36783ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
36793ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
36803ae77f43SHugh Dickins  */
36813ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
36822a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
36832a15efc9SHugh Dickins {
36842a15efc9SHugh Dickins 	struct address_space *mapping;
36852a15efc9SHugh Dickins 	pgoff_t idx;
36862a15efc9SHugh Dickins 	struct page *page;
36872a15efc9SHugh Dickins 
36882a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
36892a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
36902a15efc9SHugh Dickins 
36912a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
36922a15efc9SHugh Dickins 	if (page)
36932a15efc9SHugh Dickins 		put_page(page);
36942a15efc9SHugh Dickins 	return page != NULL;
36952a15efc9SHugh Dickins }
36962a15efc9SHugh Dickins 
3697ab76ad54SMike Kravetz int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3698ab76ad54SMike Kravetz 			   pgoff_t idx)
3699ab76ad54SMike Kravetz {
3700ab76ad54SMike Kravetz 	struct inode *inode = mapping->host;
3701ab76ad54SMike Kravetz 	struct hstate *h = hstate_inode(inode);
3702ab76ad54SMike Kravetz 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3703ab76ad54SMike Kravetz 
3704ab76ad54SMike Kravetz 	if (err)
3705ab76ad54SMike Kravetz 		return err;
3706ab76ad54SMike Kravetz 	ClearPagePrivate(page);
3707ab76ad54SMike Kravetz 
370822146c3cSMike Kravetz 	/*
370922146c3cSMike Kravetz 	 * set page dirty so that it will not be removed from cache/file
371022146c3cSMike Kravetz 	 * by non-hugetlbfs specific code paths.
371122146c3cSMike Kravetz 	 */
371222146c3cSMike Kravetz 	set_page_dirty(page);
371322146c3cSMike Kravetz 
3714ab76ad54SMike Kravetz 	spin_lock(&inode->i_lock);
3715ab76ad54SMike Kravetz 	inode->i_blocks += blocks_per_huge_page(h);
3716ab76ad54SMike Kravetz 	spin_unlock(&inode->i_lock);
3717ab76ad54SMike Kravetz 	return 0;
3718ab76ad54SMike Kravetz }
3719ab76ad54SMike Kravetz 
37202b740303SSouptick Joarder static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
37212b740303SSouptick Joarder 			struct vm_area_struct *vma,
37228382d914SDavidlohr Bueso 			struct address_space *mapping, pgoff_t idx,
3723788c7df4SHugh Dickins 			unsigned long address, pte_t *ptep, unsigned int flags)
3724ac9b9c66SHugh Dickins {
3725a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
37262b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
3727409eb8c2SHillf Danton 	int anon_rmap = 0;
37284c887265SAdam Litke 	unsigned long size;
37294c887265SAdam Litke 	struct page *page;
37301e8f889bSDavid Gibson 	pte_t new_pte;
3731cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
3732285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
37334c887265SAdam Litke 
373404f2cbe3SMel Gorman 	/*
373504f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
373604f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
373725985edcSLucas De Marchi 	 * COW. Warn that such a situation has occurred as it may not be obvious
373804f2cbe3SMel Gorman 	 */
373904f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3740910154d5SGeoffrey Thomas 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
374104f2cbe3SMel Gorman 			   current->pid);
374204f2cbe3SMel Gorman 		return ret;
374304f2cbe3SMel Gorman 	}
374404f2cbe3SMel Gorman 
37454c887265SAdam Litke 	/*
37464c887265SAdam Litke 	 * Use page lock to guard against racing truncation
37474c887265SAdam Litke 	 * before we get page_table_lock.
37484c887265SAdam Litke 	 */
37496bda666aSChristoph Lameter retry:
37506bda666aSChristoph Lameter 	page = find_lock_page(mapping, idx);
37516bda666aSChristoph Lameter 	if (!page) {
3752a5516438SAndi Kleen 		size = i_size_read(mapping->host) >> huge_page_shift(h);
3753ebed4bfcSHugh Dickins 		if (idx >= size)
3754ebed4bfcSHugh Dickins 			goto out;
37551a1aad8aSMike Kravetz 
37561a1aad8aSMike Kravetz 		/*
37571a1aad8aSMike Kravetz 		 * Check for page in userfault range
37581a1aad8aSMike Kravetz 		 */
37591a1aad8aSMike Kravetz 		if (userfaultfd_missing(vma)) {
37601a1aad8aSMike Kravetz 			u32 hash;
37611a1aad8aSMike Kravetz 			struct vm_fault vmf = {
37621a1aad8aSMike Kravetz 				.vma = vma,
3763285b8dcaSHuang Ying 				.address = haddr,
37641a1aad8aSMike Kravetz 				.flags = flags,
37651a1aad8aSMike Kravetz 				/*
37661a1aad8aSMike Kravetz 				 * Hard to debug if it ends up being
37671a1aad8aSMike Kravetz 				 * used by a callee that assumes
37681a1aad8aSMike Kravetz 				 * something about the other
37691a1aad8aSMike Kravetz 				 * uninitialized fields... same as in
37701a1aad8aSMike Kravetz 				 * memory.c
37711a1aad8aSMike Kravetz 				 */
37721a1aad8aSMike Kravetz 			};
37731a1aad8aSMike Kravetz 
37741a1aad8aSMike Kravetz 			/*
37751a1aad8aSMike Kravetz 			 * hugetlb_fault_mutex must be dropped before
37761a1aad8aSMike Kravetz 			 * handling userfault.  Reacquire after handling
37771a1aad8aSMike Kravetz 			 * fault to make calling code simpler.
37781a1aad8aSMike Kravetz 			 */
37791a1aad8aSMike Kravetz 			hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3780285b8dcaSHuang Ying 							idx, haddr);
37811a1aad8aSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
37821a1aad8aSMike Kravetz 			ret = handle_userfault(&vmf, VM_UFFD_MISSING);
37831a1aad8aSMike Kravetz 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
37841a1aad8aSMike Kravetz 			goto out;
37851a1aad8aSMike Kravetz 		}
37861a1aad8aSMike Kravetz 
3787285b8dcaSHuang Ying 		page = alloc_huge_page(vma, haddr, 0);
37882fc39cecSAdam Litke 		if (IS_ERR(page)) {
37892b740303SSouptick Joarder 			ret = vmf_error(PTR_ERR(page));
37906bda666aSChristoph Lameter 			goto out;
37916bda666aSChristoph Lameter 		}
379247ad8475SAndrea Arcangeli 		clear_huge_page(page, address, pages_per_huge_page(h));
37930ed361deSNick Piggin 		__SetPageUptodate(page);
3794bcc54222SNaoya Horiguchi 		set_page_huge_active(page);
3795ac9b9c66SHugh Dickins 
3796f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
3797ab76ad54SMike Kravetz 			int err = huge_add_to_page_cache(page, mapping, idx);
37986bda666aSChristoph Lameter 			if (err) {
37996bda666aSChristoph Lameter 				put_page(page);
38006bda666aSChristoph Lameter 				if (err == -EEXIST)
38016bda666aSChristoph Lameter 					goto retry;
38026bda666aSChristoph Lameter 				goto out;
38036bda666aSChristoph Lameter 			}
380423be7468SMel Gorman 		} else {
38056bda666aSChristoph Lameter 			lock_page(page);
38060fe6e20bSNaoya Horiguchi 			if (unlikely(anon_vma_prepare(vma))) {
38070fe6e20bSNaoya Horiguchi 				ret = VM_FAULT_OOM;
38080fe6e20bSNaoya Horiguchi 				goto backout_unlocked;
380923be7468SMel Gorman 			}
3810409eb8c2SHillf Danton 			anon_rmap = 1;
38110fe6e20bSNaoya Horiguchi 		}
38120fe6e20bSNaoya Horiguchi 	} else {
381357303d80SAndy Whitcroft 		/*
3814998b4382SNaoya Horiguchi 		 * If memory error occurs between mmap() and fault, some process
3815998b4382SNaoya Horiguchi 		 * don't have hwpoisoned swap entry for errored virtual address.
3816998b4382SNaoya Horiguchi 		 * So we need to block hugepage fault by PG_hwpoison bit check.
3817fd6a03edSNaoya Horiguchi 		 */
3818fd6a03edSNaoya Horiguchi 		if (unlikely(PageHWPoison(page))) {
3819aa50d3a7SAndi Kleen 			ret = VM_FAULT_HWPOISON |
3820972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
3821fd6a03edSNaoya Horiguchi 			goto backout_unlocked;
38226bda666aSChristoph Lameter 		}
3823998b4382SNaoya Horiguchi 	}
38241e8f889bSDavid Gibson 
382557303d80SAndy Whitcroft 	/*
382657303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
382757303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
382857303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
382957303d80SAndy Whitcroft 	 * the spinlock.
383057303d80SAndy Whitcroft 	 */
38315e911373SMike Kravetz 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3832285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
38332b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
38342b26736cSAndy Whitcroft 			goto backout_unlocked;
38352b26736cSAndy Whitcroft 		}
38365e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
3837285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
38385e911373SMike Kravetz 	}
383957303d80SAndy Whitcroft 
38408bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(h, mm, ptep);
3841a5516438SAndi Kleen 	size = i_size_read(mapping->host) >> huge_page_shift(h);
38424c887265SAdam Litke 	if (idx >= size)
38434c887265SAdam Litke 		goto backout;
38444c887265SAdam Litke 
384583c54070SNick Piggin 	ret = 0;
38467f2e9525SGerald Schaefer 	if (!huge_pte_none(huge_ptep_get(ptep)))
38474c887265SAdam Litke 		goto backout;
38484c887265SAdam Litke 
384907443a85SJoonsoo Kim 	if (anon_rmap) {
385007443a85SJoonsoo Kim 		ClearPagePrivate(page);
3851285b8dcaSHuang Ying 		hugepage_add_new_anon_rmap(page, vma, haddr);
3852ac714904SChoi Gi-yong 	} else
385353f9263bSKirill A. Shutemov 		page_dup_rmap(page, true);
38541e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
38551e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
3856285b8dcaSHuang Ying 	set_huge_pte_at(mm, haddr, ptep, new_pte);
38571e8f889bSDavid Gibson 
38585d317b2bSNaoya Horiguchi 	hugetlb_count_add(pages_per_huge_page(h), mm);
3859788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
38601e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
3861974e6d66SHuang Ying 		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
38621e8f889bSDavid Gibson 	}
38631e8f889bSDavid Gibson 
3864cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
38654c887265SAdam Litke 	unlock_page(page);
38664c887265SAdam Litke out:
3867ac9b9c66SHugh Dickins 	return ret;
38684c887265SAdam Litke 
38694c887265SAdam Litke backout:
3870cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
38712b26736cSAndy Whitcroft backout_unlocked:
38724c887265SAdam Litke 	unlock_page(page);
3873285b8dcaSHuang Ying 	restore_reserve_on_error(h, vma, haddr, page);
38744c887265SAdam Litke 	put_page(page);
38754c887265SAdam Litke 	goto out;
3876ac9b9c66SHugh Dickins }
3877ac9b9c66SHugh Dickins 
38788382d914SDavidlohr Bueso #ifdef CONFIG_SMP
3879c672c7f2SMike Kravetz u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
38808382d914SDavidlohr Bueso 			    struct vm_area_struct *vma,
38818382d914SDavidlohr Bueso 			    struct address_space *mapping,
38828382d914SDavidlohr Bueso 			    pgoff_t idx, unsigned long address)
38838382d914SDavidlohr Bueso {
38848382d914SDavidlohr Bueso 	unsigned long key[2];
38858382d914SDavidlohr Bueso 	u32 hash;
38868382d914SDavidlohr Bueso 
38878382d914SDavidlohr Bueso 	if (vma->vm_flags & VM_SHARED) {
38888382d914SDavidlohr Bueso 		key[0] = (unsigned long) mapping;
38898382d914SDavidlohr Bueso 		key[1] = idx;
38908382d914SDavidlohr Bueso 	} else {
38918382d914SDavidlohr Bueso 		key[0] = (unsigned long) mm;
38928382d914SDavidlohr Bueso 		key[1] = address >> huge_page_shift(h);
38938382d914SDavidlohr Bueso 	}
38948382d914SDavidlohr Bueso 
38958382d914SDavidlohr Bueso 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
38968382d914SDavidlohr Bueso 
38978382d914SDavidlohr Bueso 	return hash & (num_fault_mutexes - 1);
38988382d914SDavidlohr Bueso }
38998382d914SDavidlohr Bueso #else
39008382d914SDavidlohr Bueso /*
39018382d914SDavidlohr Bueso  * For uniprocesor systems we always use a single mutex, so just
39028382d914SDavidlohr Bueso  * return 0 and avoid the hashing overhead.
39038382d914SDavidlohr Bueso  */
3904c672c7f2SMike Kravetz u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
39058382d914SDavidlohr Bueso 			    struct vm_area_struct *vma,
39068382d914SDavidlohr Bueso 			    struct address_space *mapping,
39078382d914SDavidlohr Bueso 			    pgoff_t idx, unsigned long address)
39088382d914SDavidlohr Bueso {
39098382d914SDavidlohr Bueso 	return 0;
39108382d914SDavidlohr Bueso }
39118382d914SDavidlohr Bueso #endif
39128382d914SDavidlohr Bueso 
39132b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3914788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
391586e5216fSAdam Litke {
39168382d914SDavidlohr Bueso 	pte_t *ptep, entry;
3917cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
39182b740303SSouptick Joarder 	vm_fault_t ret;
39198382d914SDavidlohr Bueso 	u32 hash;
39208382d914SDavidlohr Bueso 	pgoff_t idx;
39210fe6e20bSNaoya Horiguchi 	struct page *page = NULL;
392257303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
3923a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
39248382d914SDavidlohr Bueso 	struct address_space *mapping;
39250f792cf9SNaoya Horiguchi 	int need_wait_lock = 0;
3926285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
392786e5216fSAdam Litke 
3928285b8dcaSHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3929fd6a03edSNaoya Horiguchi 	if (ptep) {
3930fd6a03edSNaoya Horiguchi 		entry = huge_ptep_get(ptep);
3931290408d4SNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(entry))) {
3932cb900f41SKirill A. Shutemov 			migration_entry_wait_huge(vma, mm, ptep);
3933290408d4SNaoya Horiguchi 			return 0;
3934290408d4SNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3935aa50d3a7SAndi Kleen 			return VM_FAULT_HWPOISON_LARGE |
3936972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
39370d777df5SNaoya Horiguchi 	} else {
3938285b8dcaSHuang Ying 		ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
393986e5216fSAdam Litke 		if (!ptep)
394086e5216fSAdam Litke 			return VM_FAULT_OOM;
39410d777df5SNaoya Horiguchi 	}
394286e5216fSAdam Litke 
39438382d914SDavidlohr Bueso 	mapping = vma->vm_file->f_mapping;
3944285b8dcaSHuang Ying 	idx = vma_hugecache_offset(h, vma, haddr);
39458382d914SDavidlohr Bueso 
39463935baa9SDavid Gibson 	/*
39473935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
39483935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
39493935baa9SDavid Gibson 	 * the same page in the page cache.
39503935baa9SDavid Gibson 	 */
3951285b8dcaSHuang Ying 	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3952c672c7f2SMike Kravetz 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
39538382d914SDavidlohr Bueso 
39547f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
39557f2e9525SGerald Schaefer 	if (huge_pte_none(entry)) {
39568382d914SDavidlohr Bueso 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3957b4d1d99fSDavid Gibson 		goto out_mutex;
39583935baa9SDavid Gibson 	}
395986e5216fSAdam Litke 
396083c54070SNick Piggin 	ret = 0;
39611e8f889bSDavid Gibson 
396257303d80SAndy Whitcroft 	/*
39630f792cf9SNaoya Horiguchi 	 * entry could be a migration/hwpoison entry at this point, so this
39640f792cf9SNaoya Horiguchi 	 * check prevents the kernel from going below assuming that we have
39650f792cf9SNaoya Horiguchi 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
39660f792cf9SNaoya Horiguchi 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
39670f792cf9SNaoya Horiguchi 	 * handle it.
39680f792cf9SNaoya Horiguchi 	 */
39690f792cf9SNaoya Horiguchi 	if (!pte_present(entry))
39700f792cf9SNaoya Horiguchi 		goto out_mutex;
39710f792cf9SNaoya Horiguchi 
39720f792cf9SNaoya Horiguchi 	/*
397357303d80SAndy Whitcroft 	 * If we are going to COW the mapping later, we examine the pending
397457303d80SAndy Whitcroft 	 * reservations for this page now. This will ensure that any
397557303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
397657303d80SAndy Whitcroft 	 * spinlock. For private mappings, we also lookup the pagecache
397757303d80SAndy Whitcroft 	 * page now as it is used to determine if a reservation has been
397857303d80SAndy Whitcroft 	 * consumed.
397957303d80SAndy Whitcroft 	 */
3980106c992aSGerald Schaefer 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3981285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
39822b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
3983b4d1d99fSDavid Gibson 			goto out_mutex;
39842b26736cSAndy Whitcroft 		}
39855e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
3986285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
398757303d80SAndy Whitcroft 
3988f83a275dSMel Gorman 		if (!(vma->vm_flags & VM_MAYSHARE))
398957303d80SAndy Whitcroft 			pagecache_page = hugetlbfs_pagecache_page(h,
3990285b8dcaSHuang Ying 								vma, haddr);
399157303d80SAndy Whitcroft 	}
399257303d80SAndy Whitcroft 
39930f792cf9SNaoya Horiguchi 	ptl = huge_pte_lock(h, mm, ptep);
39940fe6e20bSNaoya Horiguchi 
39951e8f889bSDavid Gibson 	/* Check for a racing update before calling hugetlb_cow */
3996b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3997cb900f41SKirill A. Shutemov 		goto out_ptl;
3998b4d1d99fSDavid Gibson 
39990f792cf9SNaoya Horiguchi 	/*
40000f792cf9SNaoya Horiguchi 	 * hugetlb_cow() requires page locks of pte_page(entry) and
40010f792cf9SNaoya Horiguchi 	 * pagecache_page, so here we need take the former one
40020f792cf9SNaoya Horiguchi 	 * when page != pagecache_page or !pagecache_page.
40030f792cf9SNaoya Horiguchi 	 */
40040f792cf9SNaoya Horiguchi 	page = pte_page(entry);
40050f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
40060f792cf9SNaoya Horiguchi 		if (!trylock_page(page)) {
40070f792cf9SNaoya Horiguchi 			need_wait_lock = 1;
40080f792cf9SNaoya Horiguchi 			goto out_ptl;
40090f792cf9SNaoya Horiguchi 		}
40100f792cf9SNaoya Horiguchi 
40110f792cf9SNaoya Horiguchi 	get_page(page);
4012b4d1d99fSDavid Gibson 
4013788c7df4SHugh Dickins 	if (flags & FAULT_FLAG_WRITE) {
4014106c992aSGerald Schaefer 		if (!huge_pte_write(entry)) {
4015974e6d66SHuang Ying 			ret = hugetlb_cow(mm, vma, address, ptep,
4016cb900f41SKirill A. Shutemov 					  pagecache_page, ptl);
40170f792cf9SNaoya Horiguchi 			goto out_put_page;
4018b4d1d99fSDavid Gibson 		}
4019106c992aSGerald Schaefer 		entry = huge_pte_mkdirty(entry);
4020b4d1d99fSDavid Gibson 	}
4021b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
4022285b8dcaSHuang Ying 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4023788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
4024285b8dcaSHuang Ying 		update_mmu_cache(vma, haddr, ptep);
40250f792cf9SNaoya Horiguchi out_put_page:
40260f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
40270f792cf9SNaoya Horiguchi 		unlock_page(page);
40280f792cf9SNaoya Horiguchi 	put_page(page);
4029cb900f41SKirill A. Shutemov out_ptl:
4030cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
403157303d80SAndy Whitcroft 
403257303d80SAndy Whitcroft 	if (pagecache_page) {
403357303d80SAndy Whitcroft 		unlock_page(pagecache_page);
403457303d80SAndy Whitcroft 		put_page(pagecache_page);
403557303d80SAndy Whitcroft 	}
4036b4d1d99fSDavid Gibson out_mutex:
4037c672c7f2SMike Kravetz 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
40380f792cf9SNaoya Horiguchi 	/*
40390f792cf9SNaoya Horiguchi 	 * Generally it's safe to hold refcount during waiting page lock. But
40400f792cf9SNaoya Horiguchi 	 * here we just wait to defer the next page fault to avoid busy loop and
40410f792cf9SNaoya Horiguchi 	 * the page is not used after unlocked before returning from the current
40420f792cf9SNaoya Horiguchi 	 * page fault. So we are safe from accessing freed page, even if we wait
40430f792cf9SNaoya Horiguchi 	 * here without taking refcount.
40440f792cf9SNaoya Horiguchi 	 */
40450f792cf9SNaoya Horiguchi 	if (need_wait_lock)
40460f792cf9SNaoya Horiguchi 		wait_on_page_locked(page);
40471e8f889bSDavid Gibson 	return ret;
404886e5216fSAdam Litke }
404986e5216fSAdam Litke 
40508fb5debcSMike Kravetz /*
40518fb5debcSMike Kravetz  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
40528fb5debcSMike Kravetz  * modifications for huge pages.
40538fb5debcSMike Kravetz  */
40548fb5debcSMike Kravetz int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
40558fb5debcSMike Kravetz 			    pte_t *dst_pte,
40568fb5debcSMike Kravetz 			    struct vm_area_struct *dst_vma,
40578fb5debcSMike Kravetz 			    unsigned long dst_addr,
40588fb5debcSMike Kravetz 			    unsigned long src_addr,
40598fb5debcSMike Kravetz 			    struct page **pagep)
40608fb5debcSMike Kravetz {
40611e392147SAndrea Arcangeli 	struct address_space *mapping;
40621e392147SAndrea Arcangeli 	pgoff_t idx;
40631e392147SAndrea Arcangeli 	unsigned long size;
40641c9e8defSMike Kravetz 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
40658fb5debcSMike Kravetz 	struct hstate *h = hstate_vma(dst_vma);
40668fb5debcSMike Kravetz 	pte_t _dst_pte;
40678fb5debcSMike Kravetz 	spinlock_t *ptl;
40688fb5debcSMike Kravetz 	int ret;
40698fb5debcSMike Kravetz 	struct page *page;
40708fb5debcSMike Kravetz 
40718fb5debcSMike Kravetz 	if (!*pagep) {
40728fb5debcSMike Kravetz 		ret = -ENOMEM;
40738fb5debcSMike Kravetz 		page = alloc_huge_page(dst_vma, dst_addr, 0);
40748fb5debcSMike Kravetz 		if (IS_ERR(page))
40758fb5debcSMike Kravetz 			goto out;
40768fb5debcSMike Kravetz 
40778fb5debcSMike Kravetz 		ret = copy_huge_page_from_user(page,
40788fb5debcSMike Kravetz 						(const void __user *) src_addr,
4079810a56b9SMike Kravetz 						pages_per_huge_page(h), false);
40808fb5debcSMike Kravetz 
40818fb5debcSMike Kravetz 		/* fallback to copy_from_user outside mmap_sem */
40828fb5debcSMike Kravetz 		if (unlikely(ret)) {
40838fb5debcSMike Kravetz 			ret = -EFAULT;
40848fb5debcSMike Kravetz 			*pagep = page;
40858fb5debcSMike Kravetz 			/* don't free the page */
40868fb5debcSMike Kravetz 			goto out;
40878fb5debcSMike Kravetz 		}
40888fb5debcSMike Kravetz 	} else {
40898fb5debcSMike Kravetz 		page = *pagep;
40908fb5debcSMike Kravetz 		*pagep = NULL;
40918fb5debcSMike Kravetz 	}
40928fb5debcSMike Kravetz 
40938fb5debcSMike Kravetz 	/*
40948fb5debcSMike Kravetz 	 * The memory barrier inside __SetPageUptodate makes sure that
40958fb5debcSMike Kravetz 	 * preceding stores to the page contents become visible before
40968fb5debcSMike Kravetz 	 * the set_pte_at() write.
40978fb5debcSMike Kravetz 	 */
40988fb5debcSMike Kravetz 	__SetPageUptodate(page);
40998fb5debcSMike Kravetz 	set_page_huge_active(page);
41008fb5debcSMike Kravetz 
41011e392147SAndrea Arcangeli 	mapping = dst_vma->vm_file->f_mapping;
41021e392147SAndrea Arcangeli 	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
41031e392147SAndrea Arcangeli 
41041c9e8defSMike Kravetz 	/*
41051c9e8defSMike Kravetz 	 * If shared, add to page cache
41061c9e8defSMike Kravetz 	 */
41071c9e8defSMike Kravetz 	if (vm_shared) {
41081e392147SAndrea Arcangeli 		size = i_size_read(mapping->host) >> huge_page_shift(h);
41091e392147SAndrea Arcangeli 		ret = -EFAULT;
41101e392147SAndrea Arcangeli 		if (idx >= size)
41111e392147SAndrea Arcangeli 			goto out_release_nounlock;
41121c9e8defSMike Kravetz 
41131e392147SAndrea Arcangeli 		/*
41141e392147SAndrea Arcangeli 		 * Serialization between remove_inode_hugepages() and
41151e392147SAndrea Arcangeli 		 * huge_add_to_page_cache() below happens through the
41161e392147SAndrea Arcangeli 		 * hugetlb_fault_mutex_table that here must be hold by
41171e392147SAndrea Arcangeli 		 * the caller.
41181e392147SAndrea Arcangeli 		 */
41191c9e8defSMike Kravetz 		ret = huge_add_to_page_cache(page, mapping, idx);
41201c9e8defSMike Kravetz 		if (ret)
41211c9e8defSMike Kravetz 			goto out_release_nounlock;
41221c9e8defSMike Kravetz 	}
41231c9e8defSMike Kravetz 
41248fb5debcSMike Kravetz 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
41258fb5debcSMike Kravetz 	spin_lock(ptl);
41268fb5debcSMike Kravetz 
41271e392147SAndrea Arcangeli 	/*
41281e392147SAndrea Arcangeli 	 * Recheck the i_size after holding PT lock to make sure not
41291e392147SAndrea Arcangeli 	 * to leave any page mapped (as page_mapped()) beyond the end
41301e392147SAndrea Arcangeli 	 * of the i_size (remove_inode_hugepages() is strict about
41311e392147SAndrea Arcangeli 	 * enforcing that). If we bail out here, we'll also leave a
41321e392147SAndrea Arcangeli 	 * page in the radix tree in the vm_shared case beyond the end
41331e392147SAndrea Arcangeli 	 * of the i_size, but remove_inode_hugepages() will take care
41341e392147SAndrea Arcangeli 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
41351e392147SAndrea Arcangeli 	 */
41361e392147SAndrea Arcangeli 	size = i_size_read(mapping->host) >> huge_page_shift(h);
41371e392147SAndrea Arcangeli 	ret = -EFAULT;
41381e392147SAndrea Arcangeli 	if (idx >= size)
41391e392147SAndrea Arcangeli 		goto out_release_unlock;
41401e392147SAndrea Arcangeli 
41418fb5debcSMike Kravetz 	ret = -EEXIST;
41428fb5debcSMike Kravetz 	if (!huge_pte_none(huge_ptep_get(dst_pte)))
41438fb5debcSMike Kravetz 		goto out_release_unlock;
41448fb5debcSMike Kravetz 
41451c9e8defSMike Kravetz 	if (vm_shared) {
41461c9e8defSMike Kravetz 		page_dup_rmap(page, true);
41471c9e8defSMike Kravetz 	} else {
41488fb5debcSMike Kravetz 		ClearPagePrivate(page);
41498fb5debcSMike Kravetz 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
41501c9e8defSMike Kravetz 	}
41518fb5debcSMike Kravetz 
41528fb5debcSMike Kravetz 	_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
41538fb5debcSMike Kravetz 	if (dst_vma->vm_flags & VM_WRITE)
41548fb5debcSMike Kravetz 		_dst_pte = huge_pte_mkdirty(_dst_pte);
41558fb5debcSMike Kravetz 	_dst_pte = pte_mkyoung(_dst_pte);
41568fb5debcSMike Kravetz 
41578fb5debcSMike Kravetz 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
41588fb5debcSMike Kravetz 
41598fb5debcSMike Kravetz 	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
41608fb5debcSMike Kravetz 					dst_vma->vm_flags & VM_WRITE);
41618fb5debcSMike Kravetz 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
41628fb5debcSMike Kravetz 
41638fb5debcSMike Kravetz 	/* No need to invalidate - it was non-present before */
41648fb5debcSMike Kravetz 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
41658fb5debcSMike Kravetz 
41668fb5debcSMike Kravetz 	spin_unlock(ptl);
41671c9e8defSMike Kravetz 	if (vm_shared)
41681c9e8defSMike Kravetz 		unlock_page(page);
41698fb5debcSMike Kravetz 	ret = 0;
41708fb5debcSMike Kravetz out:
41718fb5debcSMike Kravetz 	return ret;
41728fb5debcSMike Kravetz out_release_unlock:
41738fb5debcSMike Kravetz 	spin_unlock(ptl);
41741c9e8defSMike Kravetz 	if (vm_shared)
41751c9e8defSMike Kravetz 		unlock_page(page);
41765af10dfdSAndrea Arcangeli out_release_nounlock:
41778fb5debcSMike Kravetz 	put_page(page);
41788fb5debcSMike Kravetz 	goto out;
41798fb5debcSMike Kravetz }
41808fb5debcSMike Kravetz 
418128a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
418263551ae0SDavid Gibson 			 struct page **pages, struct vm_area_struct **vmas,
418328a35716SMichel Lespinasse 			 unsigned long *position, unsigned long *nr_pages,
418487ffc118SAndrea Arcangeli 			 long i, unsigned int flags, int *nonblocking)
418563551ae0SDavid Gibson {
4186d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
4187d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
418828a35716SMichel Lespinasse 	unsigned long remainder = *nr_pages;
4189a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
41902be7cfedSDaniel Jordan 	int err = -EFAULT;
419163551ae0SDavid Gibson 
419263551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
419363551ae0SDavid Gibson 		pte_t *pte;
4194cb900f41SKirill A. Shutemov 		spinlock_t *ptl = NULL;
41952a15efc9SHugh Dickins 		int absent;
419663551ae0SDavid Gibson 		struct page *page;
419763551ae0SDavid Gibson 
41984c887265SAdam Litke 		/*
419902057967SDavid Rientjes 		 * If we have a pending SIGKILL, don't keep faulting pages and
420002057967SDavid Rientjes 		 * potentially allocating memory.
420102057967SDavid Rientjes 		 */
420202057967SDavid Rientjes 		if (unlikely(fatal_signal_pending(current))) {
420302057967SDavid Rientjes 			remainder = 0;
420402057967SDavid Rientjes 			break;
420502057967SDavid Rientjes 		}
420602057967SDavid Rientjes 
420702057967SDavid Rientjes 		/*
42084c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
42092a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
42104c887265SAdam Litke 		 * first, for the page indexing below to work.
4211cb900f41SKirill A. Shutemov 		 *
4212cb900f41SKirill A. Shutemov 		 * Note that page table lock is not held when pte is null.
42134c887265SAdam Litke 		 */
42147868a208SPunit Agrawal 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
42157868a208SPunit Agrawal 				      huge_page_size(h));
4216cb900f41SKirill A. Shutemov 		if (pte)
4217cb900f41SKirill A. Shutemov 			ptl = huge_pte_lock(h, mm, pte);
42182a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
421963551ae0SDavid Gibson 
42202a15efc9SHugh Dickins 		/*
42212a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
42223ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
42233ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
42243ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
42253ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
42262a15efc9SHugh Dickins 		 */
42273ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
42283ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4229cb900f41SKirill A. Shutemov 			if (pte)
4230cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
42312a15efc9SHugh Dickins 			remainder = 0;
42322a15efc9SHugh Dickins 			break;
42332a15efc9SHugh Dickins 		}
42342a15efc9SHugh Dickins 
42359cc3a5bdSNaoya Horiguchi 		/*
42369cc3a5bdSNaoya Horiguchi 		 * We need call hugetlb_fault for both hugepages under migration
42379cc3a5bdSNaoya Horiguchi 		 * (in which case hugetlb_fault waits for the migration,) and
42389cc3a5bdSNaoya Horiguchi 		 * hwpoisoned hugepages (in which case we need to prevent the
42399cc3a5bdSNaoya Horiguchi 		 * caller from accessing to them.) In order to do this, we use
42409cc3a5bdSNaoya Horiguchi 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
42419cc3a5bdSNaoya Horiguchi 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
42429cc3a5bdSNaoya Horiguchi 		 * both cases, and because we can't follow correct pages
42439cc3a5bdSNaoya Horiguchi 		 * directly from any kind of swap entries.
42449cc3a5bdSNaoya Horiguchi 		 */
42459cc3a5bdSNaoya Horiguchi 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4246106c992aSGerald Schaefer 		    ((flags & FOLL_WRITE) &&
4247106c992aSGerald Schaefer 		      !huge_pte_write(huge_ptep_get(pte)))) {
42482b740303SSouptick Joarder 			vm_fault_t ret;
424987ffc118SAndrea Arcangeli 			unsigned int fault_flags = 0;
42504c887265SAdam Litke 
4251cb900f41SKirill A. Shutemov 			if (pte)
4252cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
425387ffc118SAndrea Arcangeli 			if (flags & FOLL_WRITE)
425487ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_WRITE;
425587ffc118SAndrea Arcangeli 			if (nonblocking)
425687ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_ALLOW_RETRY;
425787ffc118SAndrea Arcangeli 			if (flags & FOLL_NOWAIT)
425887ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
425987ffc118SAndrea Arcangeli 					FAULT_FLAG_RETRY_NOWAIT;
426087ffc118SAndrea Arcangeli 			if (flags & FOLL_TRIED) {
426187ffc118SAndrea Arcangeli 				VM_WARN_ON_ONCE(fault_flags &
426287ffc118SAndrea Arcangeli 						FAULT_FLAG_ALLOW_RETRY);
426387ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_TRIED;
426487ffc118SAndrea Arcangeli 			}
426587ffc118SAndrea Arcangeli 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
426687ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_ERROR) {
42672be7cfedSDaniel Jordan 				err = vm_fault_to_errno(ret, flags);
42681c59827dSHugh Dickins 				remainder = 0;
42691c59827dSHugh Dickins 				break;
42701c59827dSHugh Dickins 			}
427187ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_RETRY) {
427287ffc118SAndrea Arcangeli 				if (nonblocking)
427387ffc118SAndrea Arcangeli 					*nonblocking = 0;
427487ffc118SAndrea Arcangeli 				*nr_pages = 0;
427587ffc118SAndrea Arcangeli 				/*
427687ffc118SAndrea Arcangeli 				 * VM_FAULT_RETRY must not return an
427787ffc118SAndrea Arcangeli 				 * error, it will return zero
427887ffc118SAndrea Arcangeli 				 * instead.
427987ffc118SAndrea Arcangeli 				 *
428087ffc118SAndrea Arcangeli 				 * No need to update "position" as the
428187ffc118SAndrea Arcangeli 				 * caller will not check it after
428287ffc118SAndrea Arcangeli 				 * *nr_pages is set to 0.
428387ffc118SAndrea Arcangeli 				 */
428487ffc118SAndrea Arcangeli 				return i;
428587ffc118SAndrea Arcangeli 			}
428687ffc118SAndrea Arcangeli 			continue;
428787ffc118SAndrea Arcangeli 		}
428863551ae0SDavid Gibson 
4289a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
42907f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
4291d5d4b0aaSChen, Kenneth W same_page:
4292d6692183SChen, Kenneth W 		if (pages) {
429369d177c2SAndy Whitcroft 			pages[i] = mem_map_offset(page, pfn_offset);
4294ddc58f27SKirill A. Shutemov 			get_page(pages[i]);
4295d6692183SChen, Kenneth W 		}
429663551ae0SDavid Gibson 
429763551ae0SDavid Gibson 		if (vmas)
429863551ae0SDavid Gibson 			vmas[i] = vma;
429963551ae0SDavid Gibson 
430063551ae0SDavid Gibson 		vaddr += PAGE_SIZE;
4301d5d4b0aaSChen, Kenneth W 		++pfn_offset;
430263551ae0SDavid Gibson 		--remainder;
430363551ae0SDavid Gibson 		++i;
4304d5d4b0aaSChen, Kenneth W 		if (vaddr < vma->vm_end && remainder &&
4305a5516438SAndi Kleen 				pfn_offset < pages_per_huge_page(h)) {
4306d5d4b0aaSChen, Kenneth W 			/*
4307d5d4b0aaSChen, Kenneth W 			 * We use pfn_offset to avoid touching the pageframes
4308d5d4b0aaSChen, Kenneth W 			 * of this compound page.
4309d5d4b0aaSChen, Kenneth W 			 */
4310d5d4b0aaSChen, Kenneth W 			goto same_page;
4311d5d4b0aaSChen, Kenneth W 		}
4312cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
431363551ae0SDavid Gibson 	}
431428a35716SMichel Lespinasse 	*nr_pages = remainder;
431587ffc118SAndrea Arcangeli 	/*
431687ffc118SAndrea Arcangeli 	 * setting position is actually required only if remainder is
431787ffc118SAndrea Arcangeli 	 * not zero but it's faster not to add a "if (remainder)"
431887ffc118SAndrea Arcangeli 	 * branch.
431987ffc118SAndrea Arcangeli 	 */
432063551ae0SDavid Gibson 	*position = vaddr;
432163551ae0SDavid Gibson 
43222be7cfedSDaniel Jordan 	return i ? i : err;
432363551ae0SDavid Gibson }
43248f860591SZhang, Yanmin 
43255491ae7bSAneesh Kumar K.V #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
43265491ae7bSAneesh Kumar K.V /*
43275491ae7bSAneesh Kumar K.V  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
43285491ae7bSAneesh Kumar K.V  * implement this.
43295491ae7bSAneesh Kumar K.V  */
43305491ae7bSAneesh Kumar K.V #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
43315491ae7bSAneesh Kumar K.V #endif
43325491ae7bSAneesh Kumar K.V 
43337da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
43348f860591SZhang, Yanmin 		unsigned long address, unsigned long end, pgprot_t newprot)
43358f860591SZhang, Yanmin {
43368f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
43378f860591SZhang, Yanmin 	unsigned long start = address;
43388f860591SZhang, Yanmin 	pte_t *ptep;
43398f860591SZhang, Yanmin 	pte_t pte;
4340a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
43417da4d641SPeter Zijlstra 	unsigned long pages = 0;
4342dff11abeSMike Kravetz 	unsigned long f_start = start;
4343dff11abeSMike Kravetz 	unsigned long f_end = end;
4344dff11abeSMike Kravetz 	bool shared_pmd = false;
4345dff11abeSMike Kravetz 
4346dff11abeSMike Kravetz 	/*
4347dff11abeSMike Kravetz 	 * In the case of shared PMDs, the area to flush could be beyond
4348dff11abeSMike Kravetz 	 * start/end.  Set f_start/f_end to cover the maximum possible
4349dff11abeSMike Kravetz 	 * range if PMD sharing is possible.
4350dff11abeSMike Kravetz 	 */
4351dff11abeSMike Kravetz 	adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
43528f860591SZhang, Yanmin 
43538f860591SZhang, Yanmin 	BUG_ON(address >= end);
4354dff11abeSMike Kravetz 	flush_cache_range(vma, f_start, f_end);
43558f860591SZhang, Yanmin 
4356dff11abeSMike Kravetz 	mmu_notifier_invalidate_range_start(mm, f_start, f_end);
435783cde9e8SDavidlohr Bueso 	i_mmap_lock_write(vma->vm_file->f_mapping);
4358a5516438SAndi Kleen 	for (; address < end; address += huge_page_size(h)) {
4359cb900f41SKirill A. Shutemov 		spinlock_t *ptl;
43607868a208SPunit Agrawal 		ptep = huge_pte_offset(mm, address, huge_page_size(h));
43618f860591SZhang, Yanmin 		if (!ptep)
43628f860591SZhang, Yanmin 			continue;
4363cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
43647da4d641SPeter Zijlstra 		if (huge_pmd_unshare(mm, &address, ptep)) {
43657da4d641SPeter Zijlstra 			pages++;
4366cb900f41SKirill A. Shutemov 			spin_unlock(ptl);
4367dff11abeSMike Kravetz 			shared_pmd = true;
436839dde65cSChen, Kenneth W 			continue;
43697da4d641SPeter Zijlstra 		}
4370a8bda28dSNaoya Horiguchi 		pte = huge_ptep_get(ptep);
4371a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4372a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
4373a8bda28dSNaoya Horiguchi 			continue;
4374a8bda28dSNaoya Horiguchi 		}
4375a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(pte))) {
4376a8bda28dSNaoya Horiguchi 			swp_entry_t entry = pte_to_swp_entry(pte);
4377a8bda28dSNaoya Horiguchi 
4378a8bda28dSNaoya Horiguchi 			if (is_write_migration_entry(entry)) {
4379a8bda28dSNaoya Horiguchi 				pte_t newpte;
4380a8bda28dSNaoya Horiguchi 
4381a8bda28dSNaoya Horiguchi 				make_migration_entry_read(&entry);
4382a8bda28dSNaoya Horiguchi 				newpte = swp_entry_to_pte(entry);
4383e5251fd4SPunit Agrawal 				set_huge_swap_pte_at(mm, address, ptep,
4384e5251fd4SPunit Agrawal 						     newpte, huge_page_size(h));
4385a8bda28dSNaoya Horiguchi 				pages++;
4386a8bda28dSNaoya Horiguchi 			}
4387a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
4388a8bda28dSNaoya Horiguchi 			continue;
4389a8bda28dSNaoya Horiguchi 		}
4390a8bda28dSNaoya Horiguchi 		if (!huge_pte_none(pte)) {
43918f860591SZhang, Yanmin 			pte = huge_ptep_get_and_clear(mm, address, ptep);
4392106c992aSGerald Schaefer 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4393be7517d6STony Lu 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
43948f860591SZhang, Yanmin 			set_huge_pte_at(mm, address, ptep, pte);
43957da4d641SPeter Zijlstra 			pages++;
43968f860591SZhang, Yanmin 		}
4397cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
43988f860591SZhang, Yanmin 	}
4399d833352aSMel Gorman 	/*
4400c8c06efaSDavidlohr Bueso 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4401d833352aSMel Gorman 	 * may have cleared our pud entry and done put_page on the page table:
4402c8c06efaSDavidlohr Bueso 	 * once we release i_mmap_rwsem, another task can do the final put_page
4403dff11abeSMike Kravetz 	 * and that page table be reused and filled with junk.  If we actually
4404dff11abeSMike Kravetz 	 * did unshare a page of pmds, flush the range corresponding to the pud.
4405d833352aSMel Gorman 	 */
4406dff11abeSMike Kravetz 	if (shared_pmd)
4407dff11abeSMike Kravetz 		flush_hugetlb_tlb_range(vma, f_start, f_end);
4408dff11abeSMike Kravetz 	else
44095491ae7bSAneesh Kumar K.V 		flush_hugetlb_tlb_range(vma, start, end);
44100f10851eSJérôme Glisse 	/*
44110f10851eSJérôme Glisse 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
44120f10851eSJérôme Glisse 	 * page table protection not changing it to point to a new page.
44130f10851eSJérôme Glisse 	 *
4414ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
44150f10851eSJérôme Glisse 	 */
441683cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(vma->vm_file->f_mapping);
4417dff11abeSMike Kravetz 	mmu_notifier_invalidate_range_end(mm, f_start, f_end);
44187da4d641SPeter Zijlstra 
44197da4d641SPeter Zijlstra 	return pages << h->order;
44208f860591SZhang, Yanmin }
44218f860591SZhang, Yanmin 
4422a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode,
4423a1e78772SMel Gorman 					long from, long to,
44245a6fe125SMel Gorman 					struct vm_area_struct *vma,
4425ca16d140SKOSAKI Motohiro 					vm_flags_t vm_flags)
4426e4e574b7SAdam Litke {
442717c9d12eSMel Gorman 	long ret, chg;
4428a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
442990481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
44309119a41eSJoonsoo Kim 	struct resv_map *resv_map;
44311c5ecae3SMike Kravetz 	long gbl_reserve;
4432e4e574b7SAdam Litke 
443363489f8eSMike Kravetz 	/* This should never happen */
443463489f8eSMike Kravetz 	if (from > to) {
443563489f8eSMike Kravetz 		VM_WARN(1, "%s called with a negative range\n", __func__);
443663489f8eSMike Kravetz 		return -EINVAL;
443763489f8eSMike Kravetz 	}
443863489f8eSMike Kravetz 
4439a1e78772SMel Gorman 	/*
444017c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
444117c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
444290481622SDavid Gibson 	 * without using reserves
444317c9d12eSMel Gorman 	 */
4444ca16d140SKOSAKI Motohiro 	if (vm_flags & VM_NORESERVE)
444517c9d12eSMel Gorman 		return 0;
444617c9d12eSMel Gorman 
444717c9d12eSMel Gorman 	/*
4448a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
4449a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
4450a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
4451a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
4452a1e78772SMel Gorman 	 */
44539119a41eSJoonsoo Kim 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
44544e35f483SJoonsoo Kim 		resv_map = inode_resv_map(inode);
44559119a41eSJoonsoo Kim 
44561406ec9bSJoonsoo Kim 		chg = region_chg(resv_map, from, to);
44579119a41eSJoonsoo Kim 
44589119a41eSJoonsoo Kim 	} else {
44599119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
44605a6fe125SMel Gorman 		if (!resv_map)
44615a6fe125SMel Gorman 			return -ENOMEM;
44625a6fe125SMel Gorman 
446317c9d12eSMel Gorman 		chg = to - from;
446417c9d12eSMel Gorman 
44655a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
44665a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
44675a6fe125SMel Gorman 	}
44685a6fe125SMel Gorman 
4469c50ac050SDave Hansen 	if (chg < 0) {
4470c50ac050SDave Hansen 		ret = chg;
4471c50ac050SDave Hansen 		goto out_err;
4472c50ac050SDave Hansen 	}
447317c9d12eSMel Gorman 
44741c5ecae3SMike Kravetz 	/*
44751c5ecae3SMike Kravetz 	 * There must be enough pages in the subpool for the mapping. If
44761c5ecae3SMike Kravetz 	 * the subpool has a minimum size, there may be some global
44771c5ecae3SMike Kravetz 	 * reservations already in place (gbl_reserve).
44781c5ecae3SMike Kravetz 	 */
44791c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
44801c5ecae3SMike Kravetz 	if (gbl_reserve < 0) {
4481c50ac050SDave Hansen 		ret = -ENOSPC;
4482c50ac050SDave Hansen 		goto out_err;
4483c50ac050SDave Hansen 	}
448417c9d12eSMel Gorman 
448517c9d12eSMel Gorman 	/*
448617c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
448790481622SDavid Gibson 	 * Hand the pages back to the subpool if there are not
448817c9d12eSMel Gorman 	 */
44891c5ecae3SMike Kravetz 	ret = hugetlb_acct_memory(h, gbl_reserve);
449017c9d12eSMel Gorman 	if (ret < 0) {
44911c5ecae3SMike Kravetz 		/* put back original number of pages, chg */
44921c5ecae3SMike Kravetz 		(void)hugepage_subpool_put_pages(spool, chg);
4493c50ac050SDave Hansen 		goto out_err;
449417c9d12eSMel Gorman 	}
449517c9d12eSMel Gorman 
449617c9d12eSMel Gorman 	/*
449717c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
449817c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
449917c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
450017c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
450117c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
450217c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
450317c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
450417c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
450517c9d12eSMel Gorman 	 * else has to be done for private mappings here
450617c9d12eSMel Gorman 	 */
450733039678SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
450833039678SMike Kravetz 		long add = region_add(resv_map, from, to);
450933039678SMike Kravetz 
451033039678SMike Kravetz 		if (unlikely(chg > add)) {
451133039678SMike Kravetz 			/*
451233039678SMike Kravetz 			 * pages in this range were added to the reserve
451333039678SMike Kravetz 			 * map between region_chg and region_add.  This
451433039678SMike Kravetz 			 * indicates a race with alloc_huge_page.  Adjust
451533039678SMike Kravetz 			 * the subpool and reserve counts modified above
451633039678SMike Kravetz 			 * based on the difference.
451733039678SMike Kravetz 			 */
451833039678SMike Kravetz 			long rsv_adjust;
451933039678SMike Kravetz 
452033039678SMike Kravetz 			rsv_adjust = hugepage_subpool_put_pages(spool,
452133039678SMike Kravetz 								chg - add);
452233039678SMike Kravetz 			hugetlb_acct_memory(h, -rsv_adjust);
452333039678SMike Kravetz 		}
452433039678SMike Kravetz 	}
4525a43a8c39SChen, Kenneth W 	return 0;
4526c50ac050SDave Hansen out_err:
45275e911373SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE)
4528ff8c0c53SMike Kravetz 		/* Don't call region_abort if region_chg failed */
4529ff8c0c53SMike Kravetz 		if (chg >= 0)
45305e911373SMike Kravetz 			region_abort(resv_map, from, to);
4531f031dd27SJoonsoo Kim 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4532f031dd27SJoonsoo Kim 		kref_put(&resv_map->refs, resv_map_release);
4533c50ac050SDave Hansen 	return ret;
4534a43a8c39SChen, Kenneth W }
4535a43a8c39SChen, Kenneth W 
4536b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4537b5cec28dSMike Kravetz 								long freed)
4538a43a8c39SChen, Kenneth W {
4539a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
45404e35f483SJoonsoo Kim 	struct resv_map *resv_map = inode_resv_map(inode);
45419119a41eSJoonsoo Kim 	long chg = 0;
454290481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
45431c5ecae3SMike Kravetz 	long gbl_reserve;
454445c682a6SKen Chen 
4545b5cec28dSMike Kravetz 	if (resv_map) {
4546b5cec28dSMike Kravetz 		chg = region_del(resv_map, start, end);
4547b5cec28dSMike Kravetz 		/*
4548b5cec28dSMike Kravetz 		 * region_del() can fail in the rare case where a region
4549b5cec28dSMike Kravetz 		 * must be split and another region descriptor can not be
4550b5cec28dSMike Kravetz 		 * allocated.  If end == LONG_MAX, it will not fail.
4551b5cec28dSMike Kravetz 		 */
4552b5cec28dSMike Kravetz 		if (chg < 0)
4553b5cec28dSMike Kravetz 			return chg;
4554b5cec28dSMike Kravetz 	}
4555b5cec28dSMike Kravetz 
455645c682a6SKen Chen 	spin_lock(&inode->i_lock);
4557e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
455845c682a6SKen Chen 	spin_unlock(&inode->i_lock);
455945c682a6SKen Chen 
45601c5ecae3SMike Kravetz 	/*
45611c5ecae3SMike Kravetz 	 * If the subpool has a minimum size, the number of global
45621c5ecae3SMike Kravetz 	 * reservations to be released may be adjusted.
45631c5ecae3SMike Kravetz 	 */
45641c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
45651c5ecae3SMike Kravetz 	hugetlb_acct_memory(h, -gbl_reserve);
4566b5cec28dSMike Kravetz 
4567b5cec28dSMike Kravetz 	return 0;
4568a43a8c39SChen, Kenneth W }
456993f70f90SNaoya Horiguchi 
45703212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
45713212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
45723212b535SSteve Capper 				struct vm_area_struct *vma,
45733212b535SSteve Capper 				unsigned long addr, pgoff_t idx)
45743212b535SSteve Capper {
45753212b535SSteve Capper 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
45763212b535SSteve Capper 				svma->vm_start;
45773212b535SSteve Capper 	unsigned long sbase = saddr & PUD_MASK;
45783212b535SSteve Capper 	unsigned long s_end = sbase + PUD_SIZE;
45793212b535SSteve Capper 
45803212b535SSteve Capper 	/* Allow segments to share if only one is marked locked */
4581de60f5f1SEric B Munson 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4582de60f5f1SEric B Munson 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
45833212b535SSteve Capper 
45843212b535SSteve Capper 	/*
45853212b535SSteve Capper 	 * match the virtual addresses, permission and the alignment of the
45863212b535SSteve Capper 	 * page table page.
45873212b535SSteve Capper 	 */
45883212b535SSteve Capper 	if (pmd_index(addr) != pmd_index(saddr) ||
45893212b535SSteve Capper 	    vm_flags != svm_flags ||
45903212b535SSteve Capper 	    sbase < svma->vm_start || svma->vm_end < s_end)
45913212b535SSteve Capper 		return 0;
45923212b535SSteve Capper 
45933212b535SSteve Capper 	return saddr;
45943212b535SSteve Capper }
45953212b535SSteve Capper 
459631aafb45SNicholas Krause static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45973212b535SSteve Capper {
45983212b535SSteve Capper 	unsigned long base = addr & PUD_MASK;
45993212b535SSteve Capper 	unsigned long end = base + PUD_SIZE;
46003212b535SSteve Capper 
46013212b535SSteve Capper 	/*
46023212b535SSteve Capper 	 * check on proper vm_flags and page table alignment
46033212b535SSteve Capper 	 */
4604017b1660SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
460531aafb45SNicholas Krause 		return true;
460631aafb45SNicholas Krause 	return false;
46073212b535SSteve Capper }
46083212b535SSteve Capper 
46093212b535SSteve Capper /*
4610017b1660SMike Kravetz  * Determine if start,end range within vma could be mapped by shared pmd.
4611017b1660SMike Kravetz  * If yes, adjust start and end to cover range associated with possible
4612017b1660SMike Kravetz  * shared pmd mappings.
4613017b1660SMike Kravetz  */
4614017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4615017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
4616017b1660SMike Kravetz {
4617017b1660SMike Kravetz 	unsigned long check_addr = *start;
4618017b1660SMike Kravetz 
4619017b1660SMike Kravetz 	if (!(vma->vm_flags & VM_MAYSHARE))
4620017b1660SMike Kravetz 		return;
4621017b1660SMike Kravetz 
4622017b1660SMike Kravetz 	for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4623017b1660SMike Kravetz 		unsigned long a_start = check_addr & PUD_MASK;
4624017b1660SMike Kravetz 		unsigned long a_end = a_start + PUD_SIZE;
4625017b1660SMike Kravetz 
4626017b1660SMike Kravetz 		/*
4627017b1660SMike Kravetz 		 * If sharing is possible, adjust start/end if necessary.
4628017b1660SMike Kravetz 		 */
4629017b1660SMike Kravetz 		if (range_in_vma(vma, a_start, a_end)) {
4630017b1660SMike Kravetz 			if (a_start < *start)
4631017b1660SMike Kravetz 				*start = a_start;
4632017b1660SMike Kravetz 			if (a_end > *end)
4633017b1660SMike Kravetz 				*end = a_end;
4634017b1660SMike Kravetz 		}
4635017b1660SMike Kravetz 	}
4636017b1660SMike Kravetz }
4637017b1660SMike Kravetz 
4638017b1660SMike Kravetz /*
46393212b535SSteve Capper  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
46403212b535SSteve Capper  * and returns the corresponding pte. While this is not necessary for the
46413212b535SSteve Capper  * !shared pmd case because we can allocate the pmd later as well, it makes the
46423212b535SSteve Capper  * code much cleaner. pmd allocation is essential for the shared case because
4643c8c06efaSDavidlohr Bueso  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
46443212b535SSteve Capper  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
46453212b535SSteve Capper  * bad pmd for sharing.
46463212b535SSteve Capper  */
46473212b535SSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
46483212b535SSteve Capper {
46493212b535SSteve Capper 	struct vm_area_struct *vma = find_vma(mm, addr);
46503212b535SSteve Capper 	struct address_space *mapping = vma->vm_file->f_mapping;
46513212b535SSteve Capper 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
46523212b535SSteve Capper 			vma->vm_pgoff;
46533212b535SSteve Capper 	struct vm_area_struct *svma;
46543212b535SSteve Capper 	unsigned long saddr;
46553212b535SSteve Capper 	pte_t *spte = NULL;
46563212b535SSteve Capper 	pte_t *pte;
4657cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
46583212b535SSteve Capper 
46593212b535SSteve Capper 	if (!vma_shareable(vma, addr))
46603212b535SSteve Capper 		return (pte_t *)pmd_alloc(mm, pud, addr);
46613212b535SSteve Capper 
466283cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
46633212b535SSteve Capper 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
46643212b535SSteve Capper 		if (svma == vma)
46653212b535SSteve Capper 			continue;
46663212b535SSteve Capper 
46673212b535SSteve Capper 		saddr = page_table_shareable(svma, vma, addr, idx);
46683212b535SSteve Capper 		if (saddr) {
46697868a208SPunit Agrawal 			spte = huge_pte_offset(svma->vm_mm, saddr,
46707868a208SPunit Agrawal 					       vma_mmu_pagesize(svma));
46713212b535SSteve Capper 			if (spte) {
46723212b535SSteve Capper 				get_page(virt_to_page(spte));
46733212b535SSteve Capper 				break;
46743212b535SSteve Capper 			}
46753212b535SSteve Capper 		}
46763212b535SSteve Capper 	}
46773212b535SSteve Capper 
46783212b535SSteve Capper 	if (!spte)
46793212b535SSteve Capper 		goto out;
46803212b535SSteve Capper 
46818bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4682dc6c9a35SKirill A. Shutemov 	if (pud_none(*pud)) {
46833212b535SSteve Capper 		pud_populate(mm, pud,
46843212b535SSteve Capper 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
4685c17b1f42SKirill A. Shutemov 		mm_inc_nr_pmds(mm);
4686dc6c9a35SKirill A. Shutemov 	} else {
46873212b535SSteve Capper 		put_page(virt_to_page(spte));
4688dc6c9a35SKirill A. Shutemov 	}
4689cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
46903212b535SSteve Capper out:
46913212b535SSteve Capper 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
469283cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
46933212b535SSteve Capper 	return pte;
46943212b535SSteve Capper }
46953212b535SSteve Capper 
46963212b535SSteve Capper /*
46973212b535SSteve Capper  * unmap huge page backed by shared pte.
46983212b535SSteve Capper  *
46993212b535SSteve Capper  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
47003212b535SSteve Capper  * indicated by page_count > 1, unmap is achieved by clearing pud and
47013212b535SSteve Capper  * decrementing the ref count. If count == 1, the pte page is not shared.
47023212b535SSteve Capper  *
4703cb900f41SKirill A. Shutemov  * called with page table lock held.
47043212b535SSteve Capper  *
47053212b535SSteve Capper  * returns: 1 successfully unmapped a shared pte page
47063212b535SSteve Capper  *	    0 the underlying pte page is not shared, or it is the last user
47073212b535SSteve Capper  */
47083212b535SSteve Capper int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
47093212b535SSteve Capper {
47103212b535SSteve Capper 	pgd_t *pgd = pgd_offset(mm, *addr);
4711c2febafcSKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, *addr);
4712c2febafcSKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, *addr);
47133212b535SSteve Capper 
47143212b535SSteve Capper 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
47153212b535SSteve Capper 	if (page_count(virt_to_page(ptep)) == 1)
47163212b535SSteve Capper 		return 0;
47173212b535SSteve Capper 
47183212b535SSteve Capper 	pud_clear(pud);
47193212b535SSteve Capper 	put_page(virt_to_page(ptep));
4720dc6c9a35SKirill A. Shutemov 	mm_dec_nr_pmds(mm);
47213212b535SSteve Capper 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
47223212b535SSteve Capper 	return 1;
47233212b535SSteve Capper }
47249e5fc74cSSteve Capper #define want_pmd_share()	(1)
47259e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
47269e5fc74cSSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
47279e5fc74cSSteve Capper {
47289e5fc74cSSteve Capper 	return NULL;
47299e5fc74cSSteve Capper }
4730e81f2d22SZhang Zhen 
4731e81f2d22SZhang Zhen int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4732e81f2d22SZhang Zhen {
4733e81f2d22SZhang Zhen 	return 0;
4734e81f2d22SZhang Zhen }
4735017b1660SMike Kravetz 
4736017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4737017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
4738017b1660SMike Kravetz {
4739017b1660SMike Kravetz }
47409e5fc74cSSteve Capper #define want_pmd_share()	(0)
47413212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
47423212b535SSteve Capper 
47439e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
47449e5fc74cSSteve Capper pte_t *huge_pte_alloc(struct mm_struct *mm,
47459e5fc74cSSteve Capper 			unsigned long addr, unsigned long sz)
47469e5fc74cSSteve Capper {
47479e5fc74cSSteve Capper 	pgd_t *pgd;
4748c2febafcSKirill A. Shutemov 	p4d_t *p4d;
47499e5fc74cSSteve Capper 	pud_t *pud;
47509e5fc74cSSteve Capper 	pte_t *pte = NULL;
47519e5fc74cSSteve Capper 
47529e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
4753f4f0a3d8SKirill A. Shutemov 	p4d = p4d_alloc(mm, pgd, addr);
4754f4f0a3d8SKirill A. Shutemov 	if (!p4d)
4755f4f0a3d8SKirill A. Shutemov 		return NULL;
4756c2febafcSKirill A. Shutemov 	pud = pud_alloc(mm, p4d, addr);
47579e5fc74cSSteve Capper 	if (pud) {
47589e5fc74cSSteve Capper 		if (sz == PUD_SIZE) {
47599e5fc74cSSteve Capper 			pte = (pte_t *)pud;
47609e5fc74cSSteve Capper 		} else {
47619e5fc74cSSteve Capper 			BUG_ON(sz != PMD_SIZE);
47629e5fc74cSSteve Capper 			if (want_pmd_share() && pud_none(*pud))
47639e5fc74cSSteve Capper 				pte = huge_pmd_share(mm, addr, pud);
47649e5fc74cSSteve Capper 			else
47659e5fc74cSSteve Capper 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
47669e5fc74cSSteve Capper 		}
47679e5fc74cSSteve Capper 	}
47684e666314SMichal Hocko 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
47699e5fc74cSSteve Capper 
47709e5fc74cSSteve Capper 	return pte;
47719e5fc74cSSteve Capper }
47729e5fc74cSSteve Capper 
47739b19df29SPunit Agrawal /*
47749b19df29SPunit Agrawal  * huge_pte_offset() - Walk the page table to resolve the hugepage
47759b19df29SPunit Agrawal  * entry at address @addr
47769b19df29SPunit Agrawal  *
47779b19df29SPunit Agrawal  * Return: Pointer to page table or swap entry (PUD or PMD) for
47789b19df29SPunit Agrawal  * address @addr, or NULL if a p*d_none() entry is encountered and the
47799b19df29SPunit Agrawal  * size @sz doesn't match the hugepage size at this level of the page
47809b19df29SPunit Agrawal  * table.
47819b19df29SPunit Agrawal  */
47827868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
47837868a208SPunit Agrawal 		       unsigned long addr, unsigned long sz)
47849e5fc74cSSteve Capper {
47859e5fc74cSSteve Capper 	pgd_t *pgd;
4786c2febafcSKirill A. Shutemov 	p4d_t *p4d;
47879e5fc74cSSteve Capper 	pud_t *pud;
4788c2febafcSKirill A. Shutemov 	pmd_t *pmd;
47899e5fc74cSSteve Capper 
47909e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
4791c2febafcSKirill A. Shutemov 	if (!pgd_present(*pgd))
4792c2febafcSKirill A. Shutemov 		return NULL;
4793c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
4794c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
4795c2febafcSKirill A. Shutemov 		return NULL;
47969b19df29SPunit Agrawal 
4797c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
47989b19df29SPunit Agrawal 	if (sz != PUD_SIZE && pud_none(*pud))
4799c2febafcSKirill A. Shutemov 		return NULL;
48009b19df29SPunit Agrawal 	/* hugepage or swap? */
48019b19df29SPunit Agrawal 	if (pud_huge(*pud) || !pud_present(*pud))
48029e5fc74cSSteve Capper 		return (pte_t *)pud;
48039b19df29SPunit Agrawal 
48049e5fc74cSSteve Capper 	pmd = pmd_offset(pud, addr);
48059b19df29SPunit Agrawal 	if (sz != PMD_SIZE && pmd_none(*pmd))
48069b19df29SPunit Agrawal 		return NULL;
48079b19df29SPunit Agrawal 	/* hugepage or swap? */
48089b19df29SPunit Agrawal 	if (pmd_huge(*pmd) || !pmd_present(*pmd))
48099e5fc74cSSteve Capper 		return (pte_t *)pmd;
48109b19df29SPunit Agrawal 
48119b19df29SPunit Agrawal 	return NULL;
48129e5fc74cSSteve Capper }
48139e5fc74cSSteve Capper 
481461f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
481561f77edaSNaoya Horiguchi 
481661f77edaSNaoya Horiguchi /*
481761f77edaSNaoya Horiguchi  * These functions are overwritable if your architecture needs its own
481861f77edaSNaoya Horiguchi  * behavior.
481961f77edaSNaoya Horiguchi  */
482061f77edaSNaoya Horiguchi struct page * __weak
482161f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address,
482261f77edaSNaoya Horiguchi 			      int write)
482361f77edaSNaoya Horiguchi {
482461f77edaSNaoya Horiguchi 	return ERR_PTR(-EINVAL);
482561f77edaSNaoya Horiguchi }
482661f77edaSNaoya Horiguchi 
482761f77edaSNaoya Horiguchi struct page * __weak
48284dc71451SAneesh Kumar K.V follow_huge_pd(struct vm_area_struct *vma,
48294dc71451SAneesh Kumar K.V 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
48304dc71451SAneesh Kumar K.V {
48314dc71451SAneesh Kumar K.V 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
48324dc71451SAneesh Kumar K.V 	return NULL;
48334dc71451SAneesh Kumar K.V }
48344dc71451SAneesh Kumar K.V 
48354dc71451SAneesh Kumar K.V struct page * __weak
48369e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4837e66f17ffSNaoya Horiguchi 		pmd_t *pmd, int flags)
48389e5fc74cSSteve Capper {
4839e66f17ffSNaoya Horiguchi 	struct page *page = NULL;
4840e66f17ffSNaoya Horiguchi 	spinlock_t *ptl;
4841c9d398faSNaoya Horiguchi 	pte_t pte;
4842e66f17ffSNaoya Horiguchi retry:
4843e66f17ffSNaoya Horiguchi 	ptl = pmd_lockptr(mm, pmd);
4844e66f17ffSNaoya Horiguchi 	spin_lock(ptl);
4845e66f17ffSNaoya Horiguchi 	/*
4846e66f17ffSNaoya Horiguchi 	 * make sure that the address range covered by this pmd is not
4847e66f17ffSNaoya Horiguchi 	 * unmapped from other threads.
4848e66f17ffSNaoya Horiguchi 	 */
4849e66f17ffSNaoya Horiguchi 	if (!pmd_huge(*pmd))
4850e66f17ffSNaoya Horiguchi 		goto out;
4851c9d398faSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pmd);
4852c9d398faSNaoya Horiguchi 	if (pte_present(pte)) {
485397534127SGerald Schaefer 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4854e66f17ffSNaoya Horiguchi 		if (flags & FOLL_GET)
4855e66f17ffSNaoya Horiguchi 			get_page(page);
4856e66f17ffSNaoya Horiguchi 	} else {
4857c9d398faSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
4858e66f17ffSNaoya Horiguchi 			spin_unlock(ptl);
4859e66f17ffSNaoya Horiguchi 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
4860e66f17ffSNaoya Horiguchi 			goto retry;
4861e66f17ffSNaoya Horiguchi 		}
4862e66f17ffSNaoya Horiguchi 		/*
4863e66f17ffSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
4864e66f17ffSNaoya Horiguchi 		 * follow_page_mask().
4865e66f17ffSNaoya Horiguchi 		 */
4866e66f17ffSNaoya Horiguchi 	}
4867e66f17ffSNaoya Horiguchi out:
4868e66f17ffSNaoya Horiguchi 	spin_unlock(ptl);
48699e5fc74cSSteve Capper 	return page;
48709e5fc74cSSteve Capper }
48719e5fc74cSSteve Capper 
487261f77edaSNaoya Horiguchi struct page * __weak
48739e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address,
4874e66f17ffSNaoya Horiguchi 		pud_t *pud, int flags)
48759e5fc74cSSteve Capper {
4876e66f17ffSNaoya Horiguchi 	if (flags & FOLL_GET)
4877e66f17ffSNaoya Horiguchi 		return NULL;
48789e5fc74cSSteve Capper 
4879e66f17ffSNaoya Horiguchi 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
48809e5fc74cSSteve Capper }
48819e5fc74cSSteve Capper 
4882faaa5b62SAnshuman Khandual struct page * __weak
4883faaa5b62SAnshuman Khandual follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4884faaa5b62SAnshuman Khandual {
4885faaa5b62SAnshuman Khandual 	if (flags & FOLL_GET)
4886faaa5b62SAnshuman Khandual 		return NULL;
4887faaa5b62SAnshuman Khandual 
4888faaa5b62SAnshuman Khandual 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4889faaa5b62SAnshuman Khandual }
4890faaa5b62SAnshuman Khandual 
489131caf665SNaoya Horiguchi bool isolate_huge_page(struct page *page, struct list_head *list)
489231caf665SNaoya Horiguchi {
4893bcc54222SNaoya Horiguchi 	bool ret = true;
4894bcc54222SNaoya Horiguchi 
4895309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
489631caf665SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
4897bcc54222SNaoya Horiguchi 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4898bcc54222SNaoya Horiguchi 		ret = false;
4899bcc54222SNaoya Horiguchi 		goto unlock;
4900bcc54222SNaoya Horiguchi 	}
4901bcc54222SNaoya Horiguchi 	clear_page_huge_active(page);
490231caf665SNaoya Horiguchi 	list_move_tail(&page->lru, list);
4903bcc54222SNaoya Horiguchi unlock:
490431caf665SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
4905bcc54222SNaoya Horiguchi 	return ret;
490631caf665SNaoya Horiguchi }
490731caf665SNaoya Horiguchi 
490831caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page)
490931caf665SNaoya Horiguchi {
4910309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
491131caf665SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
4912bcc54222SNaoya Horiguchi 	set_page_huge_active(page);
491331caf665SNaoya Horiguchi 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
491431caf665SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
491531caf665SNaoya Horiguchi 	put_page(page);
491631caf665SNaoya Horiguchi }
4917ab5ac90aSMichal Hocko 
4918ab5ac90aSMichal Hocko void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
4919ab5ac90aSMichal Hocko {
4920ab5ac90aSMichal Hocko 	struct hstate *h = page_hstate(oldpage);
4921ab5ac90aSMichal Hocko 
4922ab5ac90aSMichal Hocko 	hugetlb_cgroup_migrate(oldpage, newpage);
4923ab5ac90aSMichal Hocko 	set_page_owner_migrate_reason(newpage, reason);
4924ab5ac90aSMichal Hocko 
4925ab5ac90aSMichal Hocko 	/*
4926ab5ac90aSMichal Hocko 	 * transfer temporary state of the new huge page. This is
4927ab5ac90aSMichal Hocko 	 * reverse to other transitions because the newpage is going to
4928ab5ac90aSMichal Hocko 	 * be final while the old one will be freed so it takes over
4929ab5ac90aSMichal Hocko 	 * the temporary status.
4930ab5ac90aSMichal Hocko 	 *
4931ab5ac90aSMichal Hocko 	 * Also note that we have to transfer the per-node surplus state
4932ab5ac90aSMichal Hocko 	 * here as well otherwise the global surplus count will not match
4933ab5ac90aSMichal Hocko 	 * the per-node's.
4934ab5ac90aSMichal Hocko 	 */
4935ab5ac90aSMichal Hocko 	if (PageHugeTemporary(newpage)) {
4936ab5ac90aSMichal Hocko 		int old_nid = page_to_nid(oldpage);
4937ab5ac90aSMichal Hocko 		int new_nid = page_to_nid(newpage);
4938ab5ac90aSMichal Hocko 
4939ab5ac90aSMichal Hocko 		SetPageHugeTemporary(oldpage);
4940ab5ac90aSMichal Hocko 		ClearPageHugeTemporary(newpage);
4941ab5ac90aSMichal Hocko 
4942ab5ac90aSMichal Hocko 		spin_lock(&hugetlb_lock);
4943ab5ac90aSMichal Hocko 		if (h->surplus_huge_pages_node[old_nid]) {
4944ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[old_nid]--;
4945ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[new_nid]++;
4946ab5ac90aSMichal Hocko 		}
4947ab5ac90aSMichal Hocko 		spin_unlock(&hugetlb_lock);
4948ab5ac90aSMichal Hocko 	}
4949ab5ac90aSMichal Hocko }
4950