xref: /openbmc/linux/mm/hugetlb.c (revision e5bbc8a6)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Generic hugetlb support.
36d49e352SNadia Yvette Chambers  * (C) Nadia Yvette Chambers, April 2004
41da177e4SLinus Torvalds  */
51da177e4SLinus Torvalds #include <linux/list.h>
61da177e4SLinus Torvalds #include <linux/init.h>
71da177e4SLinus Torvalds #include <linux/mm.h>
8e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
91da177e4SLinus Torvalds #include <linux/sysctl.h>
101da177e4SLinus Torvalds #include <linux/highmem.h>
11cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
121da177e4SLinus Torvalds #include <linux/nodemask.h>
1363551ae0SDavid Gibson #include <linux/pagemap.h>
145da7ca86SChristoph Lameter #include <linux/mempolicy.h>
153b32123dSGideon Israel Dsouza #include <linux/compiler.h>
16aea47ff3SChristoph Lameter #include <linux/cpuset.h>
173935baa9SDavid Gibson #include <linux/mutex.h>
18aa888a74SAndi Kleen #include <linux/bootmem.h>
19a3437870SNishanth Aravamudan #include <linux/sysfs.h>
205a0e3ad6STejun Heo #include <linux/slab.h>
210fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
22fd6a03edSNaoya Horiguchi #include <linux/swap.h>
23fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
24c8721bbbSNaoya Horiguchi #include <linux/page-isolation.h>
258382d914SDavidlohr Bueso #include <linux/jhash.h>
26d6606683SLinus Torvalds 
2763551ae0SDavid Gibson #include <asm/page.h>
2863551ae0SDavid Gibson #include <asm/pgtable.h>
2924669e58SAneesh Kumar K.V #include <asm/tlb.h>
3063551ae0SDavid Gibson 
3124669e58SAneesh Kumar K.V #include <linux/io.h>
3263551ae0SDavid Gibson #include <linux/hugetlb.h>
339dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
349a305230SLee Schermerhorn #include <linux/node.h>
357835e98bSNick Piggin #include "internal.h"
361da177e4SLinus Torvalds 
37753162cdSAndrey Ryabinin int hugepages_treat_as_movable;
38a5516438SAndi Kleen 
39c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
40e5ff2159SAndi Kleen unsigned int default_hstate_idx;
41e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
42641844f5SNaoya Horiguchi /*
43641844f5SNaoya Horiguchi  * Minimum page order among possible hugepage sizes, set to a proper value
44641844f5SNaoya Horiguchi  * at boot time.
45641844f5SNaoya Horiguchi  */
46641844f5SNaoya Horiguchi static unsigned int minimum_order __read_mostly = UINT_MAX;
47e5ff2159SAndi Kleen 
4853ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
4953ba51d2SJon Tollefson 
50e5ff2159SAndi Kleen /* for command line parsing */
51e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
52e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
53e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size;
549fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
55e5ff2159SAndi Kleen 
563935baa9SDavid Gibson /*
5731caf665SNaoya Horiguchi  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
5831caf665SNaoya Horiguchi  * free_huge_pages, and surplus_huge_pages.
593935baa9SDavid Gibson  */
60c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
610bd0f9fbSEric Paris 
628382d914SDavidlohr Bueso /*
638382d914SDavidlohr Bueso  * Serializes faults on the same logical page.  This is used to
648382d914SDavidlohr Bueso  * prevent spurious OOMs when the hugepage pool is fully utilized.
658382d914SDavidlohr Bueso  */
668382d914SDavidlohr Bueso static int num_fault_mutexes;
67c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
688382d914SDavidlohr Bueso 
697ca02d0aSMike Kravetz /* Forward declaration */
707ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
717ca02d0aSMike Kravetz 
7290481622SDavid Gibson static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
7390481622SDavid Gibson {
7490481622SDavid Gibson 	bool free = (spool->count == 0) && (spool->used_hpages == 0);
7590481622SDavid Gibson 
7690481622SDavid Gibson 	spin_unlock(&spool->lock);
7790481622SDavid Gibson 
7890481622SDavid Gibson 	/* If no pages are used, and no other handles to the subpool
797ca02d0aSMike Kravetz 	 * remain, give up any reservations mased on minimum size and
807ca02d0aSMike Kravetz 	 * free the subpool */
817ca02d0aSMike Kravetz 	if (free) {
827ca02d0aSMike Kravetz 		if (spool->min_hpages != -1)
837ca02d0aSMike Kravetz 			hugetlb_acct_memory(spool->hstate,
847ca02d0aSMike Kravetz 						-spool->min_hpages);
8590481622SDavid Gibson 		kfree(spool);
8690481622SDavid Gibson 	}
877ca02d0aSMike Kravetz }
8890481622SDavid Gibson 
897ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
907ca02d0aSMike Kravetz 						long min_hpages)
9190481622SDavid Gibson {
9290481622SDavid Gibson 	struct hugepage_subpool *spool;
9390481622SDavid Gibson 
94c6a91820SMike Kravetz 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
9590481622SDavid Gibson 	if (!spool)
9690481622SDavid Gibson 		return NULL;
9790481622SDavid Gibson 
9890481622SDavid Gibson 	spin_lock_init(&spool->lock);
9990481622SDavid Gibson 	spool->count = 1;
1007ca02d0aSMike Kravetz 	spool->max_hpages = max_hpages;
1017ca02d0aSMike Kravetz 	spool->hstate = h;
1027ca02d0aSMike Kravetz 	spool->min_hpages = min_hpages;
1037ca02d0aSMike Kravetz 
1047ca02d0aSMike Kravetz 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1057ca02d0aSMike Kravetz 		kfree(spool);
1067ca02d0aSMike Kravetz 		return NULL;
1077ca02d0aSMike Kravetz 	}
1087ca02d0aSMike Kravetz 	spool->rsv_hpages = min_hpages;
10990481622SDavid Gibson 
11090481622SDavid Gibson 	return spool;
11190481622SDavid Gibson }
11290481622SDavid Gibson 
11390481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
11490481622SDavid Gibson {
11590481622SDavid Gibson 	spin_lock(&spool->lock);
11690481622SDavid Gibson 	BUG_ON(!spool->count);
11790481622SDavid Gibson 	spool->count--;
11890481622SDavid Gibson 	unlock_or_release_subpool(spool);
11990481622SDavid Gibson }
12090481622SDavid Gibson 
1211c5ecae3SMike Kravetz /*
1221c5ecae3SMike Kravetz  * Subpool accounting for allocating and reserving pages.
1231c5ecae3SMike Kravetz  * Return -ENOMEM if there are not enough resources to satisfy the
1241c5ecae3SMike Kravetz  * the request.  Otherwise, return the number of pages by which the
1251c5ecae3SMike Kravetz  * global pools must be adjusted (upward).  The returned value may
1261c5ecae3SMike Kravetz  * only be different than the passed value (delta) in the case where
1271c5ecae3SMike Kravetz  * a subpool minimum size must be manitained.
1281c5ecae3SMike Kravetz  */
1291c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
13090481622SDavid Gibson 				      long delta)
13190481622SDavid Gibson {
1321c5ecae3SMike Kravetz 	long ret = delta;
13390481622SDavid Gibson 
13490481622SDavid Gibson 	if (!spool)
1351c5ecae3SMike Kravetz 		return ret;
13690481622SDavid Gibson 
13790481622SDavid Gibson 	spin_lock(&spool->lock);
13890481622SDavid Gibson 
1391c5ecae3SMike Kravetz 	if (spool->max_hpages != -1) {		/* maximum size accounting */
1401c5ecae3SMike Kravetz 		if ((spool->used_hpages + delta) <= spool->max_hpages)
1411c5ecae3SMike Kravetz 			spool->used_hpages += delta;
1421c5ecae3SMike Kravetz 		else {
1431c5ecae3SMike Kravetz 			ret = -ENOMEM;
1441c5ecae3SMike Kravetz 			goto unlock_ret;
1451c5ecae3SMike Kravetz 		}
1461c5ecae3SMike Kravetz 	}
1471c5ecae3SMike Kravetz 
14809a95e29SMike Kravetz 	/* minimum size accounting */
14909a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
1501c5ecae3SMike Kravetz 		if (delta > spool->rsv_hpages) {
1511c5ecae3SMike Kravetz 			/*
1521c5ecae3SMike Kravetz 			 * Asking for more reserves than those already taken on
1531c5ecae3SMike Kravetz 			 * behalf of subpool.  Return difference.
1541c5ecae3SMike Kravetz 			 */
1551c5ecae3SMike Kravetz 			ret = delta - spool->rsv_hpages;
1561c5ecae3SMike Kravetz 			spool->rsv_hpages = 0;
1571c5ecae3SMike Kravetz 		} else {
1581c5ecae3SMike Kravetz 			ret = 0;	/* reserves already accounted for */
1591c5ecae3SMike Kravetz 			spool->rsv_hpages -= delta;
1601c5ecae3SMike Kravetz 		}
1611c5ecae3SMike Kravetz 	}
1621c5ecae3SMike Kravetz 
1631c5ecae3SMike Kravetz unlock_ret:
1641c5ecae3SMike Kravetz 	spin_unlock(&spool->lock);
16590481622SDavid Gibson 	return ret;
16690481622SDavid Gibson }
16790481622SDavid Gibson 
1681c5ecae3SMike Kravetz /*
1691c5ecae3SMike Kravetz  * Subpool accounting for freeing and unreserving pages.
1701c5ecae3SMike Kravetz  * Return the number of global page reservations that must be dropped.
1711c5ecae3SMike Kravetz  * The return value may only be different than the passed value (delta)
1721c5ecae3SMike Kravetz  * in the case where a subpool minimum size must be maintained.
1731c5ecae3SMike Kravetz  */
1741c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
17590481622SDavid Gibson 				       long delta)
17690481622SDavid Gibson {
1771c5ecae3SMike Kravetz 	long ret = delta;
1781c5ecae3SMike Kravetz 
17990481622SDavid Gibson 	if (!spool)
1801c5ecae3SMike Kravetz 		return delta;
18190481622SDavid Gibson 
18290481622SDavid Gibson 	spin_lock(&spool->lock);
1831c5ecae3SMike Kravetz 
1841c5ecae3SMike Kravetz 	if (spool->max_hpages != -1)		/* maximum size accounting */
18590481622SDavid Gibson 		spool->used_hpages -= delta;
1861c5ecae3SMike Kravetz 
18709a95e29SMike Kravetz 	 /* minimum size accounting */
18809a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
1891c5ecae3SMike Kravetz 		if (spool->rsv_hpages + delta <= spool->min_hpages)
1901c5ecae3SMike Kravetz 			ret = 0;
1911c5ecae3SMike Kravetz 		else
1921c5ecae3SMike Kravetz 			ret = spool->rsv_hpages + delta - spool->min_hpages;
1931c5ecae3SMike Kravetz 
1941c5ecae3SMike Kravetz 		spool->rsv_hpages += delta;
1951c5ecae3SMike Kravetz 		if (spool->rsv_hpages > spool->min_hpages)
1961c5ecae3SMike Kravetz 			spool->rsv_hpages = spool->min_hpages;
1971c5ecae3SMike Kravetz 	}
1981c5ecae3SMike Kravetz 
1991c5ecae3SMike Kravetz 	/*
2001c5ecae3SMike Kravetz 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
2011c5ecae3SMike Kravetz 	 * quota reference, free it now.
2021c5ecae3SMike Kravetz 	 */
20390481622SDavid Gibson 	unlock_or_release_subpool(spool);
2041c5ecae3SMike Kravetz 
2051c5ecae3SMike Kravetz 	return ret;
20690481622SDavid Gibson }
20790481622SDavid Gibson 
20890481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
20990481622SDavid Gibson {
21090481622SDavid Gibson 	return HUGETLBFS_SB(inode->i_sb)->spool;
21190481622SDavid Gibson }
21290481622SDavid Gibson 
21390481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
21490481622SDavid Gibson {
215496ad9aaSAl Viro 	return subpool_inode(file_inode(vma->vm_file));
21690481622SDavid Gibson }
21790481622SDavid Gibson 
218e7c4b0bfSAndy Whitcroft /*
21996822904SAndy Whitcroft  * Region tracking -- allows tracking of reservations and instantiated pages
22096822904SAndy Whitcroft  *                    across the pages in a mapping.
22184afd99bSAndy Whitcroft  *
2221dd308a7SMike Kravetz  * The region data structures are embedded into a resv_map and protected
2231dd308a7SMike Kravetz  * by a resv_map's lock.  The set of regions within the resv_map represent
2241dd308a7SMike Kravetz  * reservations for huge pages, or huge pages that have already been
2251dd308a7SMike Kravetz  * instantiated within the map.  The from and to elements are huge page
2261dd308a7SMike Kravetz  * indicies into the associated mapping.  from indicates the starting index
2271dd308a7SMike Kravetz  * of the region.  to represents the first index past the end of  the region.
2281dd308a7SMike Kravetz  *
2291dd308a7SMike Kravetz  * For example, a file region structure with from == 0 and to == 4 represents
2301dd308a7SMike Kravetz  * four huge pages in a mapping.  It is important to note that the to element
2311dd308a7SMike Kravetz  * represents the first element past the end of the region. This is used in
2321dd308a7SMike Kravetz  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
2331dd308a7SMike Kravetz  *
2341dd308a7SMike Kravetz  * Interval notation of the form [from, to) will be used to indicate that
2351dd308a7SMike Kravetz  * the endpoint from is inclusive and to is exclusive.
23696822904SAndy Whitcroft  */
23796822904SAndy Whitcroft struct file_region {
23896822904SAndy Whitcroft 	struct list_head link;
23996822904SAndy Whitcroft 	long from;
24096822904SAndy Whitcroft 	long to;
24196822904SAndy Whitcroft };
24296822904SAndy Whitcroft 
2431dd308a7SMike Kravetz /*
2441dd308a7SMike Kravetz  * Add the huge page range represented by [f, t) to the reserve
2455e911373SMike Kravetz  * map.  In the normal case, existing regions will be expanded
2465e911373SMike Kravetz  * to accommodate the specified range.  Sufficient regions should
2475e911373SMike Kravetz  * exist for expansion due to the previous call to region_chg
2485e911373SMike Kravetz  * with the same range.  However, it is possible that region_del
2495e911373SMike Kravetz  * could have been called after region_chg and modifed the map
2505e911373SMike Kravetz  * in such a way that no region exists to be expanded.  In this
2515e911373SMike Kravetz  * case, pull a region descriptor from the cache associated with
2525e911373SMike Kravetz  * the map and use that for the new range.
253cf3ad20bSMike Kravetz  *
254cf3ad20bSMike Kravetz  * Return the number of new huge pages added to the map.  This
255cf3ad20bSMike Kravetz  * number is greater than or equal to zero.
2561dd308a7SMike Kravetz  */
2571406ec9bSJoonsoo Kim static long region_add(struct resv_map *resv, long f, long t)
25896822904SAndy Whitcroft {
2591406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
26096822904SAndy Whitcroft 	struct file_region *rg, *nrg, *trg;
261cf3ad20bSMike Kravetz 	long add = 0;
26296822904SAndy Whitcroft 
2637b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
26496822904SAndy Whitcroft 	/* Locate the region we are either in or before. */
26596822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
26696822904SAndy Whitcroft 		if (f <= rg->to)
26796822904SAndy Whitcroft 			break;
26896822904SAndy Whitcroft 
2695e911373SMike Kravetz 	/*
2705e911373SMike Kravetz 	 * If no region exists which can be expanded to include the
2715e911373SMike Kravetz 	 * specified range, the list must have been modified by an
2725e911373SMike Kravetz 	 * interleving call to region_del().  Pull a region descriptor
2735e911373SMike Kravetz 	 * from the cache and use it for this range.
2745e911373SMike Kravetz 	 */
2755e911373SMike Kravetz 	if (&rg->link == head || t < rg->from) {
2765e911373SMike Kravetz 		VM_BUG_ON(resv->region_cache_count <= 0);
2775e911373SMike Kravetz 
2785e911373SMike Kravetz 		resv->region_cache_count--;
2795e911373SMike Kravetz 		nrg = list_first_entry(&resv->region_cache, struct file_region,
2805e911373SMike Kravetz 					link);
2815e911373SMike Kravetz 		list_del(&nrg->link);
2825e911373SMike Kravetz 
2835e911373SMike Kravetz 		nrg->from = f;
2845e911373SMike Kravetz 		nrg->to = t;
2855e911373SMike Kravetz 		list_add(&nrg->link, rg->link.prev);
2865e911373SMike Kravetz 
2875e911373SMike Kravetz 		add += t - f;
2885e911373SMike Kravetz 		goto out_locked;
2895e911373SMike Kravetz 	}
2905e911373SMike Kravetz 
29196822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
29296822904SAndy Whitcroft 	if (f > rg->from)
29396822904SAndy Whitcroft 		f = rg->from;
29496822904SAndy Whitcroft 
29596822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
29696822904SAndy Whitcroft 	nrg = rg;
29796822904SAndy Whitcroft 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
29896822904SAndy Whitcroft 		if (&rg->link == head)
29996822904SAndy Whitcroft 			break;
30096822904SAndy Whitcroft 		if (rg->from > t)
30196822904SAndy Whitcroft 			break;
30296822904SAndy Whitcroft 
30396822904SAndy Whitcroft 		/* If this area reaches higher then extend our area to
30496822904SAndy Whitcroft 		 * include it completely.  If this is not the first area
30596822904SAndy Whitcroft 		 * which we intend to reuse, free it. */
30696822904SAndy Whitcroft 		if (rg->to > t)
30796822904SAndy Whitcroft 			t = rg->to;
30896822904SAndy Whitcroft 		if (rg != nrg) {
309cf3ad20bSMike Kravetz 			/* Decrement return value by the deleted range.
310cf3ad20bSMike Kravetz 			 * Another range will span this area so that by
311cf3ad20bSMike Kravetz 			 * end of routine add will be >= zero
312cf3ad20bSMike Kravetz 			 */
313cf3ad20bSMike Kravetz 			add -= (rg->to - rg->from);
31496822904SAndy Whitcroft 			list_del(&rg->link);
31596822904SAndy Whitcroft 			kfree(rg);
31696822904SAndy Whitcroft 		}
31796822904SAndy Whitcroft 	}
318cf3ad20bSMike Kravetz 
319cf3ad20bSMike Kravetz 	add += (nrg->from - f);		/* Added to beginning of region */
32096822904SAndy Whitcroft 	nrg->from = f;
321cf3ad20bSMike Kravetz 	add += t - nrg->to;		/* Added to end of region */
32296822904SAndy Whitcroft 	nrg->to = t;
323cf3ad20bSMike Kravetz 
3245e911373SMike Kravetz out_locked:
3255e911373SMike Kravetz 	resv->adds_in_progress--;
3267b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
327cf3ad20bSMike Kravetz 	VM_BUG_ON(add < 0);
328cf3ad20bSMike Kravetz 	return add;
32996822904SAndy Whitcroft }
33096822904SAndy Whitcroft 
3311dd308a7SMike Kravetz /*
3321dd308a7SMike Kravetz  * Examine the existing reserve map and determine how many
3331dd308a7SMike Kravetz  * huge pages in the specified range [f, t) are NOT currently
3341dd308a7SMike Kravetz  * represented.  This routine is called before a subsequent
3351dd308a7SMike Kravetz  * call to region_add that will actually modify the reserve
3361dd308a7SMike Kravetz  * map to add the specified range [f, t).  region_chg does
3371dd308a7SMike Kravetz  * not change the number of huge pages represented by the
3381dd308a7SMike Kravetz  * map.  However, if the existing regions in the map can not
3391dd308a7SMike Kravetz  * be expanded to represent the new range, a new file_region
3401dd308a7SMike Kravetz  * structure is added to the map as a placeholder.  This is
3411dd308a7SMike Kravetz  * so that the subsequent region_add call will have all the
3421dd308a7SMike Kravetz  * regions it needs and will not fail.
3431dd308a7SMike Kravetz  *
3445e911373SMike Kravetz  * Upon entry, region_chg will also examine the cache of region descriptors
3455e911373SMike Kravetz  * associated with the map.  If there are not enough descriptors cached, one
3465e911373SMike Kravetz  * will be allocated for the in progress add operation.
3475e911373SMike Kravetz  *
3485e911373SMike Kravetz  * Returns the number of huge pages that need to be added to the existing
3495e911373SMike Kravetz  * reservation map for the range [f, t).  This number is greater or equal to
3505e911373SMike Kravetz  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
3515e911373SMike Kravetz  * is needed and can not be allocated.
3521dd308a7SMike Kravetz  */
3531406ec9bSJoonsoo Kim static long region_chg(struct resv_map *resv, long f, long t)
35496822904SAndy Whitcroft {
3551406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
3567b24d861SDavidlohr Bueso 	struct file_region *rg, *nrg = NULL;
35796822904SAndy Whitcroft 	long chg = 0;
35896822904SAndy Whitcroft 
3597b24d861SDavidlohr Bueso retry:
3607b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
3615e911373SMike Kravetz retry_locked:
3625e911373SMike Kravetz 	resv->adds_in_progress++;
3635e911373SMike Kravetz 
3645e911373SMike Kravetz 	/*
3655e911373SMike Kravetz 	 * Check for sufficient descriptors in the cache to accommodate
3665e911373SMike Kravetz 	 * the number of in progress add operations.
3675e911373SMike Kravetz 	 */
3685e911373SMike Kravetz 	if (resv->adds_in_progress > resv->region_cache_count) {
3695e911373SMike Kravetz 		struct file_region *trg;
3705e911373SMike Kravetz 
3715e911373SMike Kravetz 		VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
3725e911373SMike Kravetz 		/* Must drop lock to allocate a new descriptor. */
3735e911373SMike Kravetz 		resv->adds_in_progress--;
3745e911373SMike Kravetz 		spin_unlock(&resv->lock);
3755e911373SMike Kravetz 
3765e911373SMike Kravetz 		trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377dbe409e4SMike Kravetz 		if (!trg) {
378dbe409e4SMike Kravetz 			kfree(nrg);
3795e911373SMike Kravetz 			return -ENOMEM;
380dbe409e4SMike Kravetz 		}
3815e911373SMike Kravetz 
3825e911373SMike Kravetz 		spin_lock(&resv->lock);
3835e911373SMike Kravetz 		list_add(&trg->link, &resv->region_cache);
3845e911373SMike Kravetz 		resv->region_cache_count++;
3855e911373SMike Kravetz 		goto retry_locked;
3865e911373SMike Kravetz 	}
3875e911373SMike Kravetz 
38896822904SAndy Whitcroft 	/* Locate the region we are before or in. */
38996822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
39096822904SAndy Whitcroft 		if (f <= rg->to)
39196822904SAndy Whitcroft 			break;
39296822904SAndy Whitcroft 
39396822904SAndy Whitcroft 	/* If we are below the current region then a new region is required.
39496822904SAndy Whitcroft 	 * Subtle, allocate a new region at the position but make it zero
39596822904SAndy Whitcroft 	 * size such that we can guarantee to record the reservation. */
39696822904SAndy Whitcroft 	if (&rg->link == head || t < rg->from) {
3977b24d861SDavidlohr Bueso 		if (!nrg) {
3985e911373SMike Kravetz 			resv->adds_in_progress--;
3997b24d861SDavidlohr Bueso 			spin_unlock(&resv->lock);
40096822904SAndy Whitcroft 			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
40196822904SAndy Whitcroft 			if (!nrg)
40296822904SAndy Whitcroft 				return -ENOMEM;
4037b24d861SDavidlohr Bueso 
40496822904SAndy Whitcroft 			nrg->from = f;
40596822904SAndy Whitcroft 			nrg->to   = f;
40696822904SAndy Whitcroft 			INIT_LIST_HEAD(&nrg->link);
4077b24d861SDavidlohr Bueso 			goto retry;
4087b24d861SDavidlohr Bueso 		}
40996822904SAndy Whitcroft 
4107b24d861SDavidlohr Bueso 		list_add(&nrg->link, rg->link.prev);
4117b24d861SDavidlohr Bueso 		chg = t - f;
4127b24d861SDavidlohr Bueso 		goto out_nrg;
41396822904SAndy Whitcroft 	}
41496822904SAndy Whitcroft 
41596822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
41696822904SAndy Whitcroft 	if (f > rg->from)
41796822904SAndy Whitcroft 		f = rg->from;
41896822904SAndy Whitcroft 	chg = t - f;
41996822904SAndy Whitcroft 
42096822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
42196822904SAndy Whitcroft 	list_for_each_entry(rg, rg->link.prev, link) {
42296822904SAndy Whitcroft 		if (&rg->link == head)
42396822904SAndy Whitcroft 			break;
42496822904SAndy Whitcroft 		if (rg->from > t)
4257b24d861SDavidlohr Bueso 			goto out;
42696822904SAndy Whitcroft 
42725985edcSLucas De Marchi 		/* We overlap with this area, if it extends further than
42896822904SAndy Whitcroft 		 * us then we must extend ourselves.  Account for its
42996822904SAndy Whitcroft 		 * existing reservation. */
43096822904SAndy Whitcroft 		if (rg->to > t) {
43196822904SAndy Whitcroft 			chg += rg->to - t;
43296822904SAndy Whitcroft 			t = rg->to;
43396822904SAndy Whitcroft 		}
43496822904SAndy Whitcroft 		chg -= rg->to - rg->from;
43596822904SAndy Whitcroft 	}
4367b24d861SDavidlohr Bueso 
4377b24d861SDavidlohr Bueso out:
4387b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
4397b24d861SDavidlohr Bueso 	/*  We already know we raced and no longer need the new region */
4407b24d861SDavidlohr Bueso 	kfree(nrg);
4417b24d861SDavidlohr Bueso 	return chg;
4427b24d861SDavidlohr Bueso out_nrg:
4437b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
44496822904SAndy Whitcroft 	return chg;
44596822904SAndy Whitcroft }
44696822904SAndy Whitcroft 
4471dd308a7SMike Kravetz /*
4485e911373SMike Kravetz  * Abort the in progress add operation.  The adds_in_progress field
4495e911373SMike Kravetz  * of the resv_map keeps track of the operations in progress between
4505e911373SMike Kravetz  * calls to region_chg and region_add.  Operations are sometimes
4515e911373SMike Kravetz  * aborted after the call to region_chg.  In such cases, region_abort
4525e911373SMike Kravetz  * is called to decrement the adds_in_progress counter.
4535e911373SMike Kravetz  *
4545e911373SMike Kravetz  * NOTE: The range arguments [f, t) are not needed or used in this
4555e911373SMike Kravetz  * routine.  They are kept to make reading the calling code easier as
4565e911373SMike Kravetz  * arguments will match the associated region_chg call.
4575e911373SMike Kravetz  */
4585e911373SMike Kravetz static void region_abort(struct resv_map *resv, long f, long t)
4595e911373SMike Kravetz {
4605e911373SMike Kravetz 	spin_lock(&resv->lock);
4615e911373SMike Kravetz 	VM_BUG_ON(!resv->region_cache_count);
4625e911373SMike Kravetz 	resv->adds_in_progress--;
4635e911373SMike Kravetz 	spin_unlock(&resv->lock);
4645e911373SMike Kravetz }
4655e911373SMike Kravetz 
4665e911373SMike Kravetz /*
467feba16e2SMike Kravetz  * Delete the specified range [f, t) from the reserve map.  If the
468feba16e2SMike Kravetz  * t parameter is LONG_MAX, this indicates that ALL regions after f
469feba16e2SMike Kravetz  * should be deleted.  Locate the regions which intersect [f, t)
470feba16e2SMike Kravetz  * and either trim, delete or split the existing regions.
471feba16e2SMike Kravetz  *
472feba16e2SMike Kravetz  * Returns the number of huge pages deleted from the reserve map.
473feba16e2SMike Kravetz  * In the normal case, the return value is zero or more.  In the
474feba16e2SMike Kravetz  * case where a region must be split, a new region descriptor must
475feba16e2SMike Kravetz  * be allocated.  If the allocation fails, -ENOMEM will be returned.
476feba16e2SMike Kravetz  * NOTE: If the parameter t == LONG_MAX, then we will never split
477feba16e2SMike Kravetz  * a region and possibly return -ENOMEM.  Callers specifying
478feba16e2SMike Kravetz  * t == LONG_MAX do not need to check for -ENOMEM error.
4791dd308a7SMike Kravetz  */
480feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
48196822904SAndy Whitcroft {
4821406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
48396822904SAndy Whitcroft 	struct file_region *rg, *trg;
484feba16e2SMike Kravetz 	struct file_region *nrg = NULL;
485feba16e2SMike Kravetz 	long del = 0;
48696822904SAndy Whitcroft 
487feba16e2SMike Kravetz retry:
4887b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
489feba16e2SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
490dbe409e4SMike Kravetz 		/*
491dbe409e4SMike Kravetz 		 * Skip regions before the range to be deleted.  file_region
492dbe409e4SMike Kravetz 		 * ranges are normally of the form [from, to).  However, there
493dbe409e4SMike Kravetz 		 * may be a "placeholder" entry in the map which is of the form
494dbe409e4SMike Kravetz 		 * (from, to) with from == to.  Check for placeholder entries
495dbe409e4SMike Kravetz 		 * at the beginning of the range to be deleted.
496dbe409e4SMike Kravetz 		 */
497dbe409e4SMike Kravetz 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498feba16e2SMike Kravetz 			continue;
499dbe409e4SMike Kravetz 
500feba16e2SMike Kravetz 		if (rg->from >= t)
50196822904SAndy Whitcroft 			break;
50296822904SAndy Whitcroft 
503feba16e2SMike Kravetz 		if (f > rg->from && t < rg->to) { /* Must split region */
504feba16e2SMike Kravetz 			/*
505feba16e2SMike Kravetz 			 * Check for an entry in the cache before dropping
506feba16e2SMike Kravetz 			 * lock and attempting allocation.
507feba16e2SMike Kravetz 			 */
508feba16e2SMike Kravetz 			if (!nrg &&
509feba16e2SMike Kravetz 			    resv->region_cache_count > resv->adds_in_progress) {
510feba16e2SMike Kravetz 				nrg = list_first_entry(&resv->region_cache,
511feba16e2SMike Kravetz 							struct file_region,
512feba16e2SMike Kravetz 							link);
513feba16e2SMike Kravetz 				list_del(&nrg->link);
514feba16e2SMike Kravetz 				resv->region_cache_count--;
51596822904SAndy Whitcroft 			}
51696822904SAndy Whitcroft 
517feba16e2SMike Kravetz 			if (!nrg) {
518feba16e2SMike Kravetz 				spin_unlock(&resv->lock);
519feba16e2SMike Kravetz 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520feba16e2SMike Kravetz 				if (!nrg)
521feba16e2SMike Kravetz 					return -ENOMEM;
522feba16e2SMike Kravetz 				goto retry;
523feba16e2SMike Kravetz 			}
524feba16e2SMike Kravetz 
525feba16e2SMike Kravetz 			del += t - f;
526feba16e2SMike Kravetz 
527feba16e2SMike Kravetz 			/* New entry for end of split region */
528feba16e2SMike Kravetz 			nrg->from = t;
529feba16e2SMike Kravetz 			nrg->to = rg->to;
530feba16e2SMike Kravetz 			INIT_LIST_HEAD(&nrg->link);
531feba16e2SMike Kravetz 
532feba16e2SMike Kravetz 			/* Original entry is trimmed */
533feba16e2SMike Kravetz 			rg->to = f;
534feba16e2SMike Kravetz 
535feba16e2SMike Kravetz 			list_add(&nrg->link, &rg->link);
536feba16e2SMike Kravetz 			nrg = NULL;
53796822904SAndy Whitcroft 			break;
538feba16e2SMike Kravetz 		}
539feba16e2SMike Kravetz 
540feba16e2SMike Kravetz 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541feba16e2SMike Kravetz 			del += rg->to - rg->from;
54296822904SAndy Whitcroft 			list_del(&rg->link);
54396822904SAndy Whitcroft 			kfree(rg);
544feba16e2SMike Kravetz 			continue;
54596822904SAndy Whitcroft 		}
5467b24d861SDavidlohr Bueso 
547feba16e2SMike Kravetz 		if (f <= rg->from) {	/* Trim beginning of region */
548feba16e2SMike Kravetz 			del += t - rg->from;
549feba16e2SMike Kravetz 			rg->from = t;
550feba16e2SMike Kravetz 		} else {		/* Trim end of region */
551feba16e2SMike Kravetz 			del += rg->to - f;
552feba16e2SMike Kravetz 			rg->to = f;
553feba16e2SMike Kravetz 		}
554feba16e2SMike Kravetz 	}
555feba16e2SMike Kravetz 
5567b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
557feba16e2SMike Kravetz 	kfree(nrg);
558feba16e2SMike Kravetz 	return del;
55996822904SAndy Whitcroft }
56096822904SAndy Whitcroft 
5611dd308a7SMike Kravetz /*
562b5cec28dSMike Kravetz  * A rare out of memory error was encountered which prevented removal of
563b5cec28dSMike Kravetz  * the reserve map region for a page.  The huge page itself was free'ed
564b5cec28dSMike Kravetz  * and removed from the page cache.  This routine will adjust the subpool
565b5cec28dSMike Kravetz  * usage count, and the global reserve count if needed.  By incrementing
566b5cec28dSMike Kravetz  * these counts, the reserve map entry which could not be deleted will
567b5cec28dSMike Kravetz  * appear as a "reserved" entry instead of simply dangling with incorrect
568b5cec28dSMike Kravetz  * counts.
569b5cec28dSMike Kravetz  */
57072e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
571b5cec28dSMike Kravetz {
572b5cec28dSMike Kravetz 	struct hugepage_subpool *spool = subpool_inode(inode);
573b5cec28dSMike Kravetz 	long rsv_adjust;
574b5cec28dSMike Kravetz 
575b5cec28dSMike Kravetz 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
57672e2936cSzhong jiang 	if (rsv_adjust) {
577b5cec28dSMike Kravetz 		struct hstate *h = hstate_inode(inode);
578b5cec28dSMike Kravetz 
579b5cec28dSMike Kravetz 		hugetlb_acct_memory(h, 1);
580b5cec28dSMike Kravetz 	}
581b5cec28dSMike Kravetz }
582b5cec28dSMike Kravetz 
583b5cec28dSMike Kravetz /*
5841dd308a7SMike Kravetz  * Count and return the number of huge pages in the reserve map
5851dd308a7SMike Kravetz  * that intersect with the range [f, t).
5861dd308a7SMike Kravetz  */
5871406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
58884afd99bSAndy Whitcroft {
5891406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
59084afd99bSAndy Whitcroft 	struct file_region *rg;
59184afd99bSAndy Whitcroft 	long chg = 0;
59284afd99bSAndy Whitcroft 
5937b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
59484afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
59584afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
596f2135a4aSWang Sheng-Hui 		long seg_from;
597f2135a4aSWang Sheng-Hui 		long seg_to;
59884afd99bSAndy Whitcroft 
59984afd99bSAndy Whitcroft 		if (rg->to <= f)
60084afd99bSAndy Whitcroft 			continue;
60184afd99bSAndy Whitcroft 		if (rg->from >= t)
60284afd99bSAndy Whitcroft 			break;
60384afd99bSAndy Whitcroft 
60484afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
60584afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
60684afd99bSAndy Whitcroft 
60784afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
60884afd99bSAndy Whitcroft 	}
6097b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
61084afd99bSAndy Whitcroft 
61184afd99bSAndy Whitcroft 	return chg;
61284afd99bSAndy Whitcroft }
61384afd99bSAndy Whitcroft 
61496822904SAndy Whitcroft /*
615e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
616e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
617e7c4b0bfSAndy Whitcroft  */
618a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
619a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
620e7c4b0bfSAndy Whitcroft {
621a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
622a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
623e7c4b0bfSAndy Whitcroft }
624e7c4b0bfSAndy Whitcroft 
6250fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
6260fe6e20bSNaoya Horiguchi 				     unsigned long address)
6270fe6e20bSNaoya Horiguchi {
6280fe6e20bSNaoya Horiguchi 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
6290fe6e20bSNaoya Horiguchi }
630dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
6310fe6e20bSNaoya Horiguchi 
63284afd99bSAndy Whitcroft /*
63308fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
63408fba699SMel Gorman  * cases this will be same size as used by the page table entries.
63508fba699SMel Gorman  */
63608fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
63708fba699SMel Gorman {
63808fba699SMel Gorman 	struct hstate *hstate;
63908fba699SMel Gorman 
64008fba699SMel Gorman 	if (!is_vm_hugetlb_page(vma))
64108fba699SMel Gorman 		return PAGE_SIZE;
64208fba699SMel Gorman 
64308fba699SMel Gorman 	hstate = hstate_vma(vma);
64408fba699SMel Gorman 
6452415cf12SWanpeng Li 	return 1UL << huge_page_shift(hstate);
64608fba699SMel Gorman }
647f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
64808fba699SMel Gorman 
64908fba699SMel Gorman /*
6503340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
6513340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
6523340289dSMel Gorman  * architectures where it differs, an architecture-specific version of this
6533340289dSMel Gorman  * function is required.
6543340289dSMel Gorman  */
6553340289dSMel Gorman #ifndef vma_mmu_pagesize
6563340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
6573340289dSMel Gorman {
6583340289dSMel Gorman 	return vma_kernel_pagesize(vma);
6593340289dSMel Gorman }
6603340289dSMel Gorman #endif
6613340289dSMel Gorman 
6623340289dSMel Gorman /*
66384afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
66484afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
66584afd99bSAndy Whitcroft  * alignment.
66684afd99bSAndy Whitcroft  */
66784afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
66884afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
66904f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
67084afd99bSAndy Whitcroft 
671a1e78772SMel Gorman /*
672a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
673a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
674a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
675a1e78772SMel Gorman  *
676a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
677a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
678a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
679a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
68084afd99bSAndy Whitcroft  *
68184afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
68284afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
68384afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
68484afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
68584afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
68684afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
68784afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
68884afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
689a1e78772SMel Gorman  */
690e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691e7c4b0bfSAndy Whitcroft {
692e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
693e7c4b0bfSAndy Whitcroft }
694e7c4b0bfSAndy Whitcroft 
695e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
696e7c4b0bfSAndy Whitcroft 							unsigned long value)
697e7c4b0bfSAndy Whitcroft {
698e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
699e7c4b0bfSAndy Whitcroft }
700e7c4b0bfSAndy Whitcroft 
7019119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
70284afd99bSAndy Whitcroft {
70384afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
7045e911373SMike Kravetz 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
7055e911373SMike Kravetz 
7065e911373SMike Kravetz 	if (!resv_map || !rg) {
7075e911373SMike Kravetz 		kfree(resv_map);
7085e911373SMike Kravetz 		kfree(rg);
70984afd99bSAndy Whitcroft 		return NULL;
7105e911373SMike Kravetz 	}
71184afd99bSAndy Whitcroft 
71284afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
7137b24d861SDavidlohr Bueso 	spin_lock_init(&resv_map->lock);
71484afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
71584afd99bSAndy Whitcroft 
7165e911373SMike Kravetz 	resv_map->adds_in_progress = 0;
7175e911373SMike Kravetz 
7185e911373SMike Kravetz 	INIT_LIST_HEAD(&resv_map->region_cache);
7195e911373SMike Kravetz 	list_add(&rg->link, &resv_map->region_cache);
7205e911373SMike Kravetz 	resv_map->region_cache_count = 1;
7215e911373SMike Kravetz 
72284afd99bSAndy Whitcroft 	return resv_map;
72384afd99bSAndy Whitcroft }
72484afd99bSAndy Whitcroft 
7259119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
72684afd99bSAndy Whitcroft {
72784afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
7285e911373SMike Kravetz 	struct list_head *head = &resv_map->region_cache;
7295e911373SMike Kravetz 	struct file_region *rg, *trg;
73084afd99bSAndy Whitcroft 
73184afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
732feba16e2SMike Kravetz 	region_del(resv_map, 0, LONG_MAX);
7335e911373SMike Kravetz 
7345e911373SMike Kravetz 	/* ... and any entries left in the cache */
7355e911373SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
7365e911373SMike Kravetz 		list_del(&rg->link);
7375e911373SMike Kravetz 		kfree(rg);
7385e911373SMike Kravetz 	}
7395e911373SMike Kravetz 
7405e911373SMike Kravetz 	VM_BUG_ON(resv_map->adds_in_progress);
7415e911373SMike Kravetz 
74284afd99bSAndy Whitcroft 	kfree(resv_map);
74384afd99bSAndy Whitcroft }
74484afd99bSAndy Whitcroft 
7454e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
7464e35f483SJoonsoo Kim {
7474e35f483SJoonsoo Kim 	return inode->i_mapping->private_data;
7484e35f483SJoonsoo Kim }
7494e35f483SJoonsoo Kim 
75084afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751a1e78772SMel Gorman {
75281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
7534e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE) {
7544e35f483SJoonsoo Kim 		struct address_space *mapping = vma->vm_file->f_mapping;
7554e35f483SJoonsoo Kim 		struct inode *inode = mapping->host;
7564e35f483SJoonsoo Kim 
7574e35f483SJoonsoo Kim 		return inode_resv_map(inode);
7584e35f483SJoonsoo Kim 
7594e35f483SJoonsoo Kim 	} else {
76084afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
76184afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
7624e35f483SJoonsoo Kim 	}
763a1e78772SMel Gorman }
764a1e78772SMel Gorman 
76584afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766a1e78772SMel Gorman {
76781d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
76881d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769a1e78772SMel Gorman 
77084afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
77184afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
77204f2cbe3SMel Gorman }
77304f2cbe3SMel Gorman 
77404f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
77504f2cbe3SMel Gorman {
77681d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
77781d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778e7c4b0bfSAndy Whitcroft 
779e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
78004f2cbe3SMel Gorman }
78104f2cbe3SMel Gorman 
78204f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
78304f2cbe3SMel Gorman {
78481d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785e7c4b0bfSAndy Whitcroft 
786e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
787a1e78772SMel Gorman }
788a1e78772SMel Gorman 
78904f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
790a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791a1e78772SMel Gorman {
79281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
794a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
795a1e78772SMel Gorman }
796a1e78772SMel Gorman 
797a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
798559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799a1e78772SMel Gorman {
800af0ed73eSJoonsoo Kim 	if (vma->vm_flags & VM_NORESERVE) {
801af0ed73eSJoonsoo Kim 		/*
802af0ed73eSJoonsoo Kim 		 * This address is already reserved by other process(chg == 0),
803af0ed73eSJoonsoo Kim 		 * so, we should decrement reserved count. Without decrementing,
804af0ed73eSJoonsoo Kim 		 * reserve count remains after releasing inode, because this
805af0ed73eSJoonsoo Kim 		 * allocated page will go into page cache and is regarded as
806af0ed73eSJoonsoo Kim 		 * coming from reserved pool in releasing step.  Currently, we
807af0ed73eSJoonsoo Kim 		 * don't have any other solution to deal with this situation
808af0ed73eSJoonsoo Kim 		 * properly, so add work-around here.
809af0ed73eSJoonsoo Kim 		 */
810af0ed73eSJoonsoo Kim 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811559ec2f8SNicholas Krause 			return true;
812af0ed73eSJoonsoo Kim 		else
813559ec2f8SNicholas Krause 			return false;
814af0ed73eSJoonsoo Kim 	}
815a63884e9SJoonsoo Kim 
816a63884e9SJoonsoo Kim 	/* Shared mappings always use reserves */
8171fb1b0e9SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE) {
8181fb1b0e9SMike Kravetz 		/*
8191fb1b0e9SMike Kravetz 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
8201fb1b0e9SMike Kravetz 		 * be a region map for all pages.  The only situation where
8211fb1b0e9SMike Kravetz 		 * there is no region map is if a hole was punched via
8221fb1b0e9SMike Kravetz 		 * fallocate.  In this case, there really are no reverves to
8231fb1b0e9SMike Kravetz 		 * use.  This situation is indicated if chg != 0.
8241fb1b0e9SMike Kravetz 		 */
8251fb1b0e9SMike Kravetz 		if (chg)
8261fb1b0e9SMike Kravetz 			return false;
8271fb1b0e9SMike Kravetz 		else
828559ec2f8SNicholas Krause 			return true;
8291fb1b0e9SMike Kravetz 	}
830a63884e9SJoonsoo Kim 
831a63884e9SJoonsoo Kim 	/*
832a63884e9SJoonsoo Kim 	 * Only the process that called mmap() has reserves for
833a63884e9SJoonsoo Kim 	 * private mappings.
834a63884e9SJoonsoo Kim 	 */
83567961f9dSMike Kravetz 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
83667961f9dSMike Kravetz 		/*
83767961f9dSMike Kravetz 		 * Like the shared case above, a hole punch or truncate
83867961f9dSMike Kravetz 		 * could have been performed on the private mapping.
83967961f9dSMike Kravetz 		 * Examine the value of chg to determine if reserves
84067961f9dSMike Kravetz 		 * actually exist or were previously consumed.
84167961f9dSMike Kravetz 		 * Very Subtle - The value of chg comes from a previous
84267961f9dSMike Kravetz 		 * call to vma_needs_reserves().  The reserve map for
84367961f9dSMike Kravetz 		 * private mappings has different (opposite) semantics
84467961f9dSMike Kravetz 		 * than that of shared mappings.  vma_needs_reserves()
84567961f9dSMike Kravetz 		 * has already taken this difference in semantics into
84667961f9dSMike Kravetz 		 * account.  Therefore, the meaning of chg is the same
84767961f9dSMike Kravetz 		 * as in the shared case above.  Code could easily be
84867961f9dSMike Kravetz 		 * combined, but keeping it separate draws attention to
84967961f9dSMike Kravetz 		 * subtle differences.
85067961f9dSMike Kravetz 		 */
85167961f9dSMike Kravetz 		if (chg)
85267961f9dSMike Kravetz 			return false;
85367961f9dSMike Kravetz 		else
854559ec2f8SNicholas Krause 			return true;
85567961f9dSMike Kravetz 	}
856a63884e9SJoonsoo Kim 
857559ec2f8SNicholas Krause 	return false;
858a1e78772SMel Gorman }
859a1e78772SMel Gorman 
860a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
8611da177e4SLinus Torvalds {
8621da177e4SLinus Torvalds 	int nid = page_to_nid(page);
8630edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_freelists[nid]);
864a5516438SAndi Kleen 	h->free_huge_pages++;
865a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
8661da177e4SLinus Torvalds }
8671da177e4SLinus Torvalds 
868bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
869bf50bab2SNaoya Horiguchi {
870bf50bab2SNaoya Horiguchi 	struct page *page;
871bf50bab2SNaoya Horiguchi 
872c8721bbbSNaoya Horiguchi 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
873c8721bbbSNaoya Horiguchi 		if (!is_migrate_isolate_page(page))
874c8721bbbSNaoya Horiguchi 			break;
875c8721bbbSNaoya Horiguchi 	/*
876c8721bbbSNaoya Horiguchi 	 * if 'non-isolated free hugepage' not found on the list,
877c8721bbbSNaoya Horiguchi 	 * the allocation fails.
878c8721bbbSNaoya Horiguchi 	 */
879c8721bbbSNaoya Horiguchi 	if (&h->hugepage_freelists[nid] == &page->lru)
880bf50bab2SNaoya Horiguchi 		return NULL;
8810edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_activelist);
882a9869b83SNaoya Horiguchi 	set_page_refcounted(page);
883bf50bab2SNaoya Horiguchi 	h->free_huge_pages--;
884bf50bab2SNaoya Horiguchi 	h->free_huge_pages_node[nid]--;
885bf50bab2SNaoya Horiguchi 	return page;
886bf50bab2SNaoya Horiguchi }
887bf50bab2SNaoya Horiguchi 
88886cdb465SNaoya Horiguchi /* Movability of hugepages depends on migration support. */
88986cdb465SNaoya Horiguchi static inline gfp_t htlb_alloc_mask(struct hstate *h)
89086cdb465SNaoya Horiguchi {
891100873d7SNaoya Horiguchi 	if (hugepages_treat_as_movable || hugepage_migration_supported(h))
89286cdb465SNaoya Horiguchi 		return GFP_HIGHUSER_MOVABLE;
89386cdb465SNaoya Horiguchi 	else
89486cdb465SNaoya Horiguchi 		return GFP_HIGHUSER;
89586cdb465SNaoya Horiguchi }
89686cdb465SNaoya Horiguchi 
897a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
898a5516438SAndi Kleen 				struct vm_area_struct *vma,
899af0ed73eSJoonsoo Kim 				unsigned long address, int avoid_reserve,
900af0ed73eSJoonsoo Kim 				long chg)
9011da177e4SLinus Torvalds {
902b1c12cbcSKonstantin Khlebnikov 	struct page *page = NULL;
903480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
90419770b32SMel Gorman 	nodemask_t *nodemask;
905c0ff7453SMiao Xie 	struct zonelist *zonelist;
906dd1a239fSMel Gorman 	struct zone *zone;
907dd1a239fSMel Gorman 	struct zoneref *z;
908cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
9091da177e4SLinus Torvalds 
910a1e78772SMel Gorman 	/*
911a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
912a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
913a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
914a1e78772SMel Gorman 	 */
915af0ed73eSJoonsoo Kim 	if (!vma_has_reserves(vma, chg) &&
916a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
917c0ff7453SMiao Xie 		goto err;
918a1e78772SMel Gorman 
91904f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
920a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
9216eab04a8SJustin P. Mattock 		goto err;
92204f2cbe3SMel Gorman 
9239966c4bbSJoonsoo Kim retry_cpuset:
924d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
9259966c4bbSJoonsoo Kim 	zonelist = huge_zonelist(vma, address,
92686cdb465SNaoya Horiguchi 					htlb_alloc_mask(h), &mpol, &nodemask);
9279966c4bbSJoonsoo Kim 
92819770b32SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
92919770b32SMel Gorman 						MAX_NR_ZONES - 1, nodemask) {
930344736f2SVladimir Davydov 		if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
931bf50bab2SNaoya Horiguchi 			page = dequeue_huge_page_node(h, zone_to_nid(zone));
932bf50bab2SNaoya Horiguchi 			if (page) {
933af0ed73eSJoonsoo Kim 				if (avoid_reserve)
934af0ed73eSJoonsoo Kim 					break;
935af0ed73eSJoonsoo Kim 				if (!vma_has_reserves(vma, chg))
936af0ed73eSJoonsoo Kim 					break;
937af0ed73eSJoonsoo Kim 
93807443a85SJoonsoo Kim 				SetPagePrivate(page);
939a63884e9SJoonsoo Kim 				h->resv_huge_pages--;
9405ab3ee7bSKen Chen 				break;
9411da177e4SLinus Torvalds 			}
9423abf7afdSAndrew Morton 		}
943bf50bab2SNaoya Horiguchi 	}
944cc9a6c87SMel Gorman 
945cc9a6c87SMel Gorman 	mpol_cond_put(mpol);
946d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
947cc9a6c87SMel Gorman 		goto retry_cpuset;
948cc9a6c87SMel Gorman 	return page;
949cc9a6c87SMel Gorman 
950c0ff7453SMiao Xie err:
951cc9a6c87SMel Gorman 	return NULL;
9521da177e4SLinus Torvalds }
9531da177e4SLinus Torvalds 
9541cac6f2cSLuiz Capitulino /*
9551cac6f2cSLuiz Capitulino  * common helper functions for hstate_next_node_to_{alloc|free}.
9561cac6f2cSLuiz Capitulino  * We may have allocated or freed a huge page based on a different
9571cac6f2cSLuiz Capitulino  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
9581cac6f2cSLuiz Capitulino  * be outside of *nodes_allowed.  Ensure that we use an allowed
9591cac6f2cSLuiz Capitulino  * node for alloc or free.
9601cac6f2cSLuiz Capitulino  */
9611cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
9621cac6f2cSLuiz Capitulino {
9630edaf86cSAndrew Morton 	nid = next_node_in(nid, *nodes_allowed);
9641cac6f2cSLuiz Capitulino 	VM_BUG_ON(nid >= MAX_NUMNODES);
9651cac6f2cSLuiz Capitulino 
9661cac6f2cSLuiz Capitulino 	return nid;
9671cac6f2cSLuiz Capitulino }
9681cac6f2cSLuiz Capitulino 
9691cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
9701cac6f2cSLuiz Capitulino {
9711cac6f2cSLuiz Capitulino 	if (!node_isset(nid, *nodes_allowed))
9721cac6f2cSLuiz Capitulino 		nid = next_node_allowed(nid, nodes_allowed);
9731cac6f2cSLuiz Capitulino 	return nid;
9741cac6f2cSLuiz Capitulino }
9751cac6f2cSLuiz Capitulino 
9761cac6f2cSLuiz Capitulino /*
9771cac6f2cSLuiz Capitulino  * returns the previously saved node ["this node"] from which to
9781cac6f2cSLuiz Capitulino  * allocate a persistent huge page for the pool and advance the
9791cac6f2cSLuiz Capitulino  * next node from which to allocate, handling wrap at end of node
9801cac6f2cSLuiz Capitulino  * mask.
9811cac6f2cSLuiz Capitulino  */
9821cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
9831cac6f2cSLuiz Capitulino 					nodemask_t *nodes_allowed)
9841cac6f2cSLuiz Capitulino {
9851cac6f2cSLuiz Capitulino 	int nid;
9861cac6f2cSLuiz Capitulino 
9871cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
9881cac6f2cSLuiz Capitulino 
9891cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
9901cac6f2cSLuiz Capitulino 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
9911cac6f2cSLuiz Capitulino 
9921cac6f2cSLuiz Capitulino 	return nid;
9931cac6f2cSLuiz Capitulino }
9941cac6f2cSLuiz Capitulino 
9951cac6f2cSLuiz Capitulino /*
9961cac6f2cSLuiz Capitulino  * helper for free_pool_huge_page() - return the previously saved
9971cac6f2cSLuiz Capitulino  * node ["this node"] from which to free a huge page.  Advance the
9981cac6f2cSLuiz Capitulino  * next node id whether or not we find a free huge page to free so
9991cac6f2cSLuiz Capitulino  * that the next attempt to free addresses the next node.
10001cac6f2cSLuiz Capitulino  */
10011cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
10021cac6f2cSLuiz Capitulino {
10031cac6f2cSLuiz Capitulino 	int nid;
10041cac6f2cSLuiz Capitulino 
10051cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
10061cac6f2cSLuiz Capitulino 
10071cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
10081cac6f2cSLuiz Capitulino 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
10091cac6f2cSLuiz Capitulino 
10101cac6f2cSLuiz Capitulino 	return nid;
10111cac6f2cSLuiz Capitulino }
10121cac6f2cSLuiz Capitulino 
10131cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
10141cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
10151cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
10161cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
10171cac6f2cSLuiz Capitulino 		nr_nodes--)
10181cac6f2cSLuiz Capitulino 
10191cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
10201cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
10211cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
10221cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
10231cac6f2cSLuiz Capitulino 		nr_nodes--)
10241cac6f2cSLuiz Capitulino 
1025461a7184SYisheng Xie #if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
1026d08de8e2SGerald Schaefer 	((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
1027d08de8e2SGerald Schaefer 	defined(CONFIG_CMA))
1028944d9fecSLuiz Capitulino static void destroy_compound_gigantic_page(struct page *page,
1029d00181b9SKirill A. Shutemov 					unsigned int order)
1030944d9fecSLuiz Capitulino {
1031944d9fecSLuiz Capitulino 	int i;
1032944d9fecSLuiz Capitulino 	int nr_pages = 1 << order;
1033944d9fecSLuiz Capitulino 	struct page *p = page + 1;
1034944d9fecSLuiz Capitulino 
1035c8cc708aSGerald Schaefer 	atomic_set(compound_mapcount_ptr(page), 0);
1036944d9fecSLuiz Capitulino 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
10371d798ca3SKirill A. Shutemov 		clear_compound_head(p);
1038944d9fecSLuiz Capitulino 		set_page_refcounted(p);
1039944d9fecSLuiz Capitulino 	}
1040944d9fecSLuiz Capitulino 
1041944d9fecSLuiz Capitulino 	set_compound_order(page, 0);
1042944d9fecSLuiz Capitulino 	__ClearPageHead(page);
1043944d9fecSLuiz Capitulino }
1044944d9fecSLuiz Capitulino 
1045d00181b9SKirill A. Shutemov static void free_gigantic_page(struct page *page, unsigned int order)
1046944d9fecSLuiz Capitulino {
1047944d9fecSLuiz Capitulino 	free_contig_range(page_to_pfn(page), 1 << order);
1048944d9fecSLuiz Capitulino }
1049944d9fecSLuiz Capitulino 
1050944d9fecSLuiz Capitulino static int __alloc_gigantic_page(unsigned long start_pfn,
1051944d9fecSLuiz Capitulino 				unsigned long nr_pages)
1052944d9fecSLuiz Capitulino {
1053944d9fecSLuiz Capitulino 	unsigned long end_pfn = start_pfn + nr_pages;
1054944d9fecSLuiz Capitulino 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1055944d9fecSLuiz Capitulino }
1056944d9fecSLuiz Capitulino 
1057f44b2ddaSJoonsoo Kim static bool pfn_range_valid_gigantic(struct zone *z,
1058f44b2ddaSJoonsoo Kim 			unsigned long start_pfn, unsigned long nr_pages)
1059944d9fecSLuiz Capitulino {
1060944d9fecSLuiz Capitulino 	unsigned long i, end_pfn = start_pfn + nr_pages;
1061944d9fecSLuiz Capitulino 	struct page *page;
1062944d9fecSLuiz Capitulino 
1063944d9fecSLuiz Capitulino 	for (i = start_pfn; i < end_pfn; i++) {
1064944d9fecSLuiz Capitulino 		if (!pfn_valid(i))
1065944d9fecSLuiz Capitulino 			return false;
1066944d9fecSLuiz Capitulino 
1067944d9fecSLuiz Capitulino 		page = pfn_to_page(i);
1068944d9fecSLuiz Capitulino 
1069f44b2ddaSJoonsoo Kim 		if (page_zone(page) != z)
1070f44b2ddaSJoonsoo Kim 			return false;
1071f44b2ddaSJoonsoo Kim 
1072944d9fecSLuiz Capitulino 		if (PageReserved(page))
1073944d9fecSLuiz Capitulino 			return false;
1074944d9fecSLuiz Capitulino 
1075944d9fecSLuiz Capitulino 		if (page_count(page) > 0)
1076944d9fecSLuiz Capitulino 			return false;
1077944d9fecSLuiz Capitulino 
1078944d9fecSLuiz Capitulino 		if (PageHuge(page))
1079944d9fecSLuiz Capitulino 			return false;
1080944d9fecSLuiz Capitulino 	}
1081944d9fecSLuiz Capitulino 
1082944d9fecSLuiz Capitulino 	return true;
1083944d9fecSLuiz Capitulino }
1084944d9fecSLuiz Capitulino 
1085944d9fecSLuiz Capitulino static bool zone_spans_last_pfn(const struct zone *zone,
1086944d9fecSLuiz Capitulino 			unsigned long start_pfn, unsigned long nr_pages)
1087944d9fecSLuiz Capitulino {
1088944d9fecSLuiz Capitulino 	unsigned long last_pfn = start_pfn + nr_pages - 1;
1089944d9fecSLuiz Capitulino 	return zone_spans_pfn(zone, last_pfn);
1090944d9fecSLuiz Capitulino }
1091944d9fecSLuiz Capitulino 
1092d00181b9SKirill A. Shutemov static struct page *alloc_gigantic_page(int nid, unsigned int order)
1093944d9fecSLuiz Capitulino {
1094944d9fecSLuiz Capitulino 	unsigned long nr_pages = 1 << order;
1095944d9fecSLuiz Capitulino 	unsigned long ret, pfn, flags;
1096944d9fecSLuiz Capitulino 	struct zone *z;
1097944d9fecSLuiz Capitulino 
1098944d9fecSLuiz Capitulino 	z = NODE_DATA(nid)->node_zones;
1099944d9fecSLuiz Capitulino 	for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1100944d9fecSLuiz Capitulino 		spin_lock_irqsave(&z->lock, flags);
1101944d9fecSLuiz Capitulino 
1102944d9fecSLuiz Capitulino 		pfn = ALIGN(z->zone_start_pfn, nr_pages);
1103944d9fecSLuiz Capitulino 		while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1104f44b2ddaSJoonsoo Kim 			if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1105944d9fecSLuiz Capitulino 				/*
1106944d9fecSLuiz Capitulino 				 * We release the zone lock here because
1107944d9fecSLuiz Capitulino 				 * alloc_contig_range() will also lock the zone
1108944d9fecSLuiz Capitulino 				 * at some point. If there's an allocation
1109944d9fecSLuiz Capitulino 				 * spinning on this lock, it may win the race
1110944d9fecSLuiz Capitulino 				 * and cause alloc_contig_range() to fail...
1111944d9fecSLuiz Capitulino 				 */
1112944d9fecSLuiz Capitulino 				spin_unlock_irqrestore(&z->lock, flags);
1113944d9fecSLuiz Capitulino 				ret = __alloc_gigantic_page(pfn, nr_pages);
1114944d9fecSLuiz Capitulino 				if (!ret)
1115944d9fecSLuiz Capitulino 					return pfn_to_page(pfn);
1116944d9fecSLuiz Capitulino 				spin_lock_irqsave(&z->lock, flags);
1117944d9fecSLuiz Capitulino 			}
1118944d9fecSLuiz Capitulino 			pfn += nr_pages;
1119944d9fecSLuiz Capitulino 		}
1120944d9fecSLuiz Capitulino 
1121944d9fecSLuiz Capitulino 		spin_unlock_irqrestore(&z->lock, flags);
1122944d9fecSLuiz Capitulino 	}
1123944d9fecSLuiz Capitulino 
1124944d9fecSLuiz Capitulino 	return NULL;
1125944d9fecSLuiz Capitulino }
1126944d9fecSLuiz Capitulino 
1127944d9fecSLuiz Capitulino static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1128d00181b9SKirill A. Shutemov static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1129944d9fecSLuiz Capitulino 
1130944d9fecSLuiz Capitulino static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1131944d9fecSLuiz Capitulino {
1132944d9fecSLuiz Capitulino 	struct page *page;
1133944d9fecSLuiz Capitulino 
1134944d9fecSLuiz Capitulino 	page = alloc_gigantic_page(nid, huge_page_order(h));
1135944d9fecSLuiz Capitulino 	if (page) {
1136944d9fecSLuiz Capitulino 		prep_compound_gigantic_page(page, huge_page_order(h));
1137944d9fecSLuiz Capitulino 		prep_new_huge_page(h, page, nid);
1138944d9fecSLuiz Capitulino 	}
1139944d9fecSLuiz Capitulino 
1140944d9fecSLuiz Capitulino 	return page;
1141944d9fecSLuiz Capitulino }
1142944d9fecSLuiz Capitulino 
1143944d9fecSLuiz Capitulino static int alloc_fresh_gigantic_page(struct hstate *h,
1144944d9fecSLuiz Capitulino 				nodemask_t *nodes_allowed)
1145944d9fecSLuiz Capitulino {
1146944d9fecSLuiz Capitulino 	struct page *page = NULL;
1147944d9fecSLuiz Capitulino 	int nr_nodes, node;
1148944d9fecSLuiz Capitulino 
1149944d9fecSLuiz Capitulino 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1150944d9fecSLuiz Capitulino 		page = alloc_fresh_gigantic_page_node(h, node);
1151944d9fecSLuiz Capitulino 		if (page)
1152944d9fecSLuiz Capitulino 			return 1;
1153944d9fecSLuiz Capitulino 	}
1154944d9fecSLuiz Capitulino 
1155944d9fecSLuiz Capitulino 	return 0;
1156944d9fecSLuiz Capitulino }
1157944d9fecSLuiz Capitulino 
1158944d9fecSLuiz Capitulino static inline bool gigantic_page_supported(void) { return true; }
1159944d9fecSLuiz Capitulino #else
1160944d9fecSLuiz Capitulino static inline bool gigantic_page_supported(void) { return false; }
1161d00181b9SKirill A. Shutemov static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1162944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page,
1163d00181b9SKirill A. Shutemov 						unsigned int order) { }
1164944d9fecSLuiz Capitulino static inline int alloc_fresh_gigantic_page(struct hstate *h,
1165944d9fecSLuiz Capitulino 					nodemask_t *nodes_allowed) { return 0; }
1166944d9fecSLuiz Capitulino #endif
1167944d9fecSLuiz Capitulino 
1168a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page)
11696af2acb6SAdam Litke {
11706af2acb6SAdam Litke 	int i;
1171a5516438SAndi Kleen 
1172944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
1173944d9fecSLuiz Capitulino 		return;
117418229df5SAndy Whitcroft 
1175a5516438SAndi Kleen 	h->nr_huge_pages--;
1176a5516438SAndi Kleen 	h->nr_huge_pages_node[page_to_nid(page)]--;
1177a5516438SAndi Kleen 	for (i = 0; i < pages_per_huge_page(h); i++) {
117832f84528SChris Forbes 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
117932f84528SChris Forbes 				1 << PG_referenced | 1 << PG_dirty |
1180a7407a27SLuiz Capitulino 				1 << PG_active | 1 << PG_private |
1181a7407a27SLuiz Capitulino 				1 << PG_writeback);
11826af2acb6SAdam Litke 	}
1183309381feSSasha Levin 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1184f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
11856af2acb6SAdam Litke 	set_page_refcounted(page);
1186944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h)) {
1187944d9fecSLuiz Capitulino 		destroy_compound_gigantic_page(page, huge_page_order(h));
1188944d9fecSLuiz Capitulino 		free_gigantic_page(page, huge_page_order(h));
1189944d9fecSLuiz Capitulino 	} else {
1190a5516438SAndi Kleen 		__free_pages(page, huge_page_order(h));
11916af2acb6SAdam Litke 	}
1192944d9fecSLuiz Capitulino }
11936af2acb6SAdam Litke 
1194e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1195e5ff2159SAndi Kleen {
1196e5ff2159SAndi Kleen 	struct hstate *h;
1197e5ff2159SAndi Kleen 
1198e5ff2159SAndi Kleen 	for_each_hstate(h) {
1199e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
1200e5ff2159SAndi Kleen 			return h;
1201e5ff2159SAndi Kleen 	}
1202e5ff2159SAndi Kleen 	return NULL;
1203e5ff2159SAndi Kleen }
1204e5ff2159SAndi Kleen 
1205bcc54222SNaoya Horiguchi /*
1206bcc54222SNaoya Horiguchi  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1207bcc54222SNaoya Horiguchi  * to hstate->hugepage_activelist.)
1208bcc54222SNaoya Horiguchi  *
1209bcc54222SNaoya Horiguchi  * This function can be called for tail pages, but never returns true for them.
1210bcc54222SNaoya Horiguchi  */
1211bcc54222SNaoya Horiguchi bool page_huge_active(struct page *page)
1212bcc54222SNaoya Horiguchi {
1213bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHuge(page), page);
1214bcc54222SNaoya Horiguchi 	return PageHead(page) && PagePrivate(&page[1]);
1215bcc54222SNaoya Horiguchi }
1216bcc54222SNaoya Horiguchi 
1217bcc54222SNaoya Horiguchi /* never called for tail page */
1218bcc54222SNaoya Horiguchi static void set_page_huge_active(struct page *page)
1219bcc54222SNaoya Horiguchi {
1220bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1221bcc54222SNaoya Horiguchi 	SetPagePrivate(&page[1]);
1222bcc54222SNaoya Horiguchi }
1223bcc54222SNaoya Horiguchi 
1224bcc54222SNaoya Horiguchi static void clear_page_huge_active(struct page *page)
1225bcc54222SNaoya Horiguchi {
1226bcc54222SNaoya Horiguchi 	VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227bcc54222SNaoya Horiguchi 	ClearPagePrivate(&page[1]);
1228bcc54222SNaoya Horiguchi }
1229bcc54222SNaoya Horiguchi 
12308f1d26d0SAtsushi Kumagai void free_huge_page(struct page *page)
123127a85ef1SDavid Gibson {
1232a5516438SAndi Kleen 	/*
1233a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
1234a5516438SAndi Kleen 	 * compound page destructor.
1235a5516438SAndi Kleen 	 */
1236e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
12377893d1d5SAdam Litke 	int nid = page_to_nid(page);
123890481622SDavid Gibson 	struct hugepage_subpool *spool =
123990481622SDavid Gibson 		(struct hugepage_subpool *)page_private(page);
124007443a85SJoonsoo Kim 	bool restore_reserve;
124127a85ef1SDavid Gibson 
1242e5df70abSAndy Whitcroft 	set_page_private(page, 0);
124323be7468SMel Gorman 	page->mapping = NULL;
1244b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1245b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_mapcount(page), page);
124607443a85SJoonsoo Kim 	restore_reserve = PagePrivate(page);
124716c794b4SJoonsoo Kim 	ClearPagePrivate(page);
124827a85ef1SDavid Gibson 
12491c5ecae3SMike Kravetz 	/*
12501c5ecae3SMike Kravetz 	 * A return code of zero implies that the subpool will be under its
12511c5ecae3SMike Kravetz 	 * minimum size if the reservation is not restored after page is free.
12521c5ecae3SMike Kravetz 	 * Therefore, force restore_reserve operation.
12531c5ecae3SMike Kravetz 	 */
12541c5ecae3SMike Kravetz 	if (hugepage_subpool_put_pages(spool, 1) == 0)
12551c5ecae3SMike Kravetz 		restore_reserve = true;
12561c5ecae3SMike Kravetz 
125727a85ef1SDavid Gibson 	spin_lock(&hugetlb_lock);
1258bcc54222SNaoya Horiguchi 	clear_page_huge_active(page);
12596d76dcf4SAneesh Kumar K.V 	hugetlb_cgroup_uncharge_page(hstate_index(h),
12606d76dcf4SAneesh Kumar K.V 				     pages_per_huge_page(h), page);
126107443a85SJoonsoo Kim 	if (restore_reserve)
126207443a85SJoonsoo Kim 		h->resv_huge_pages++;
126307443a85SJoonsoo Kim 
1264944d9fecSLuiz Capitulino 	if (h->surplus_huge_pages_node[nid]) {
12650edaecfaSAneesh Kumar K.V 		/* remove the page from active list */
12660edaecfaSAneesh Kumar K.V 		list_del(&page->lru);
1267a5516438SAndi Kleen 		update_and_free_page(h, page);
1268a5516438SAndi Kleen 		h->surplus_huge_pages--;
1269a5516438SAndi Kleen 		h->surplus_huge_pages_node[nid]--;
12707893d1d5SAdam Litke 	} else {
12715d3a551cSWill Deacon 		arch_clear_hugepage_flags(page);
1272a5516438SAndi Kleen 		enqueue_huge_page(h, page);
12737893d1d5SAdam Litke 	}
127427a85ef1SDavid Gibson 	spin_unlock(&hugetlb_lock);
127527a85ef1SDavid Gibson }
127627a85ef1SDavid Gibson 
1277a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1278b7ba30c6SAndi Kleen {
12790edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&page->lru);
1280f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1281b7ba30c6SAndi Kleen 	spin_lock(&hugetlb_lock);
12829dd540e2SAneesh Kumar K.V 	set_hugetlb_cgroup(page, NULL);
1283a5516438SAndi Kleen 	h->nr_huge_pages++;
1284a5516438SAndi Kleen 	h->nr_huge_pages_node[nid]++;
1285b7ba30c6SAndi Kleen 	spin_unlock(&hugetlb_lock);
1286b7ba30c6SAndi Kleen 	put_page(page); /* free it into the hugepage allocator */
1287b7ba30c6SAndi Kleen }
1288b7ba30c6SAndi Kleen 
1289d00181b9SKirill A. Shutemov static void prep_compound_gigantic_page(struct page *page, unsigned int order)
129020a0307cSWu Fengguang {
129120a0307cSWu Fengguang 	int i;
129220a0307cSWu Fengguang 	int nr_pages = 1 << order;
129320a0307cSWu Fengguang 	struct page *p = page + 1;
129420a0307cSWu Fengguang 
129520a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
129620a0307cSWu Fengguang 	set_compound_order(page, order);
1297ef5a22beSAndrea Arcangeli 	__ClearPageReserved(page);
1298de09d31dSKirill A. Shutemov 	__SetPageHead(page);
129920a0307cSWu Fengguang 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1300ef5a22beSAndrea Arcangeli 		/*
1301ef5a22beSAndrea Arcangeli 		 * For gigantic hugepages allocated through bootmem at
1302ef5a22beSAndrea Arcangeli 		 * boot, it's safer to be consistent with the not-gigantic
1303ef5a22beSAndrea Arcangeli 		 * hugepages and clear the PG_reserved bit from all tail pages
1304ef5a22beSAndrea Arcangeli 		 * too.  Otherwse drivers using get_user_pages() to access tail
1305ef5a22beSAndrea Arcangeli 		 * pages may get the reference counting wrong if they see
1306ef5a22beSAndrea Arcangeli 		 * PG_reserved set on a tail page (despite the head page not
1307ef5a22beSAndrea Arcangeli 		 * having PG_reserved set).  Enforcing this consistency between
1308ef5a22beSAndrea Arcangeli 		 * head and tail pages allows drivers to optimize away a check
1309ef5a22beSAndrea Arcangeli 		 * on the head page when they need know if put_page() is needed
1310ef5a22beSAndrea Arcangeli 		 * after get_user_pages().
1311ef5a22beSAndrea Arcangeli 		 */
1312ef5a22beSAndrea Arcangeli 		__ClearPageReserved(p);
131358a84aa9SYouquan Song 		set_page_count(p, 0);
13141d798ca3SKirill A. Shutemov 		set_compound_head(p, page);
131520a0307cSWu Fengguang 	}
1316b4330afbSMike Kravetz 	atomic_set(compound_mapcount_ptr(page), -1);
131720a0307cSWu Fengguang }
131820a0307cSWu Fengguang 
13197795912cSAndrew Morton /*
13207795912cSAndrew Morton  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
13217795912cSAndrew Morton  * transparent huge pages.  See the PageTransHuge() documentation for more
13227795912cSAndrew Morton  * details.
13237795912cSAndrew Morton  */
132420a0307cSWu Fengguang int PageHuge(struct page *page)
132520a0307cSWu Fengguang {
132620a0307cSWu Fengguang 	if (!PageCompound(page))
132720a0307cSWu Fengguang 		return 0;
132820a0307cSWu Fengguang 
132920a0307cSWu Fengguang 	page = compound_head(page);
1330f1e61557SKirill A. Shutemov 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
133120a0307cSWu Fengguang }
133243131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge);
133343131e14SNaoya Horiguchi 
133427c73ae7SAndrea Arcangeli /*
133527c73ae7SAndrea Arcangeli  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
133627c73ae7SAndrea Arcangeli  * normal or transparent huge pages.
133727c73ae7SAndrea Arcangeli  */
133827c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head)
133927c73ae7SAndrea Arcangeli {
134027c73ae7SAndrea Arcangeli 	if (!PageHead(page_head))
134127c73ae7SAndrea Arcangeli 		return 0;
134227c73ae7SAndrea Arcangeli 
1343758f66a2SAndrew Morton 	return get_compound_page_dtor(page_head) == free_huge_page;
134427c73ae7SAndrea Arcangeli }
134527c73ae7SAndrea Arcangeli 
134613d60f4bSZhang Yi pgoff_t __basepage_index(struct page *page)
134713d60f4bSZhang Yi {
134813d60f4bSZhang Yi 	struct page *page_head = compound_head(page);
134913d60f4bSZhang Yi 	pgoff_t index = page_index(page_head);
135013d60f4bSZhang Yi 	unsigned long compound_idx;
135113d60f4bSZhang Yi 
135213d60f4bSZhang Yi 	if (!PageHuge(page_head))
135313d60f4bSZhang Yi 		return page_index(page);
135413d60f4bSZhang Yi 
135513d60f4bSZhang Yi 	if (compound_order(page_head) >= MAX_ORDER)
135613d60f4bSZhang Yi 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
135713d60f4bSZhang Yi 	else
135813d60f4bSZhang Yi 		compound_idx = page - page_head;
135913d60f4bSZhang Yi 
136013d60f4bSZhang Yi 	return (index << compound_order(page_head)) + compound_idx;
136113d60f4bSZhang Yi }
136213d60f4bSZhang Yi 
1363a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
13641da177e4SLinus Torvalds {
13651da177e4SLinus Torvalds 	struct page *page;
1366f96efd58SJoe Jin 
136796db800fSVlastimil Babka 	page = __alloc_pages_node(nid,
136886cdb465SNaoya Horiguchi 		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1369551883aeSNishanth Aravamudan 						__GFP_REPEAT|__GFP_NOWARN,
1370a5516438SAndi Kleen 		huge_page_order(h));
13711da177e4SLinus Torvalds 	if (page) {
1372a5516438SAndi Kleen 		prep_new_huge_page(h, page, nid);
13731da177e4SLinus Torvalds 	}
137463b4613cSNishanth Aravamudan 
137563b4613cSNishanth Aravamudan 	return page;
137663b4613cSNishanth Aravamudan }
137763b4613cSNishanth Aravamudan 
1378b2261026SJoonsoo Kim static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1379b2261026SJoonsoo Kim {
1380b2261026SJoonsoo Kim 	struct page *page;
1381b2261026SJoonsoo Kim 	int nr_nodes, node;
1382b2261026SJoonsoo Kim 	int ret = 0;
1383b2261026SJoonsoo Kim 
1384b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1385b2261026SJoonsoo Kim 		page = alloc_fresh_huge_page_node(h, node);
1386b2261026SJoonsoo Kim 		if (page) {
1387b2261026SJoonsoo Kim 			ret = 1;
1388b2261026SJoonsoo Kim 			break;
1389b2261026SJoonsoo Kim 		}
1390b2261026SJoonsoo Kim 	}
1391b2261026SJoonsoo Kim 
1392b2261026SJoonsoo Kim 	if (ret)
1393b2261026SJoonsoo Kim 		count_vm_event(HTLB_BUDDY_PGALLOC);
1394b2261026SJoonsoo Kim 	else
1395b2261026SJoonsoo Kim 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1396b2261026SJoonsoo Kim 
1397b2261026SJoonsoo Kim 	return ret;
1398b2261026SJoonsoo Kim }
1399b2261026SJoonsoo Kim 
1400e8c5c824SLee Schermerhorn /*
1401e8c5c824SLee Schermerhorn  * Free huge page from pool from next node to free.
1402e8c5c824SLee Schermerhorn  * Attempt to keep persistent huge pages more or less
1403e8c5c824SLee Schermerhorn  * balanced over allowed nodes.
1404e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
1405e8c5c824SLee Schermerhorn  */
14066ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
14076ae11b27SLee Schermerhorn 							 bool acct_surplus)
1408e8c5c824SLee Schermerhorn {
1409b2261026SJoonsoo Kim 	int nr_nodes, node;
1410e8c5c824SLee Schermerhorn 	int ret = 0;
1411e8c5c824SLee Schermerhorn 
1412b2261026SJoonsoo Kim 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1413685f3457SLee Schermerhorn 		/*
1414685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
1415685f3457SLee Schermerhorn 		 * nodes with surplus pages.
1416685f3457SLee Schermerhorn 		 */
1417b2261026SJoonsoo Kim 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1418b2261026SJoonsoo Kim 		    !list_empty(&h->hugepage_freelists[node])) {
1419e8c5c824SLee Schermerhorn 			struct page *page =
1420b2261026SJoonsoo Kim 				list_entry(h->hugepage_freelists[node].next,
1421e8c5c824SLee Schermerhorn 					  struct page, lru);
1422e8c5c824SLee Schermerhorn 			list_del(&page->lru);
1423e8c5c824SLee Schermerhorn 			h->free_huge_pages--;
1424b2261026SJoonsoo Kim 			h->free_huge_pages_node[node]--;
1425685f3457SLee Schermerhorn 			if (acct_surplus) {
1426685f3457SLee Schermerhorn 				h->surplus_huge_pages--;
1427b2261026SJoonsoo Kim 				h->surplus_huge_pages_node[node]--;
1428685f3457SLee Schermerhorn 			}
1429e8c5c824SLee Schermerhorn 			update_and_free_page(h, page);
1430e8c5c824SLee Schermerhorn 			ret = 1;
14319a76db09SLee Schermerhorn 			break;
1432e8c5c824SLee Schermerhorn 		}
1433b2261026SJoonsoo Kim 	}
1434e8c5c824SLee Schermerhorn 
1435e8c5c824SLee Schermerhorn 	return ret;
1436e8c5c824SLee Schermerhorn }
1437e8c5c824SLee Schermerhorn 
1438c8721bbbSNaoya Horiguchi /*
1439c8721bbbSNaoya Horiguchi  * Dissolve a given free hugepage into free buddy pages. This function does
1440082d5b6bSGerald Schaefer  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1441082d5b6bSGerald Schaefer  * number of free hugepages would be reduced below the number of reserved
1442082d5b6bSGerald Schaefer  * hugepages.
1443c8721bbbSNaoya Horiguchi  */
1444082d5b6bSGerald Schaefer static int dissolve_free_huge_page(struct page *page)
1445c8721bbbSNaoya Horiguchi {
1446082d5b6bSGerald Schaefer 	int rc = 0;
1447082d5b6bSGerald Schaefer 
1448c8721bbbSNaoya Horiguchi 	spin_lock(&hugetlb_lock);
1449c8721bbbSNaoya Horiguchi 	if (PageHuge(page) && !page_count(page)) {
14502247bb33SGerald Schaefer 		struct page *head = compound_head(page);
14512247bb33SGerald Schaefer 		struct hstate *h = page_hstate(head);
14522247bb33SGerald Schaefer 		int nid = page_to_nid(head);
1453082d5b6bSGerald Schaefer 		if (h->free_huge_pages - h->resv_huge_pages == 0) {
1454082d5b6bSGerald Schaefer 			rc = -EBUSY;
1455082d5b6bSGerald Schaefer 			goto out;
1456082d5b6bSGerald Schaefer 		}
14572247bb33SGerald Schaefer 		list_del(&head->lru);
1458c8721bbbSNaoya Horiguchi 		h->free_huge_pages--;
1459c8721bbbSNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
1460c1470b33Szhong jiang 		h->max_huge_pages--;
14612247bb33SGerald Schaefer 		update_and_free_page(h, head);
1462c8721bbbSNaoya Horiguchi 	}
1463082d5b6bSGerald Schaefer out:
1464c8721bbbSNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
1465082d5b6bSGerald Schaefer 	return rc;
1466c8721bbbSNaoya Horiguchi }
1467c8721bbbSNaoya Horiguchi 
1468c8721bbbSNaoya Horiguchi /*
1469c8721bbbSNaoya Horiguchi  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1470c8721bbbSNaoya Horiguchi  * make specified memory blocks removable from the system.
14712247bb33SGerald Schaefer  * Note that this will dissolve a free gigantic hugepage completely, if any
14722247bb33SGerald Schaefer  * part of it lies within the given range.
1473082d5b6bSGerald Schaefer  * Also note that if dissolve_free_huge_page() returns with an error, all
1474082d5b6bSGerald Schaefer  * free hugepages that were dissolved before that error are lost.
1475c8721bbbSNaoya Horiguchi  */
1476082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1477c8721bbbSNaoya Horiguchi {
1478c8721bbbSNaoya Horiguchi 	unsigned long pfn;
1479eb03aa00SGerald Schaefer 	struct page *page;
1480082d5b6bSGerald Schaefer 	int rc = 0;
1481c8721bbbSNaoya Horiguchi 
1482d0177639SLi Zhong 	if (!hugepages_supported())
1483082d5b6bSGerald Schaefer 		return rc;
1484d0177639SLi Zhong 
1485eb03aa00SGerald Schaefer 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1486eb03aa00SGerald Schaefer 		page = pfn_to_page(pfn);
1487eb03aa00SGerald Schaefer 		if (PageHuge(page) && !page_count(page)) {
1488eb03aa00SGerald Schaefer 			rc = dissolve_free_huge_page(page);
1489eb03aa00SGerald Schaefer 			if (rc)
1490082d5b6bSGerald Schaefer 				break;
1491eb03aa00SGerald Schaefer 		}
1492eb03aa00SGerald Schaefer 	}
1493082d5b6bSGerald Schaefer 
1494082d5b6bSGerald Schaefer 	return rc;
1495c8721bbbSNaoya Horiguchi }
1496c8721bbbSNaoya Horiguchi 
1497099730d6SDave Hansen /*
1498099730d6SDave Hansen  * There are 3 ways this can get called:
1499099730d6SDave Hansen  * 1. With vma+addr: we use the VMA's memory policy
1500099730d6SDave Hansen  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1501099730d6SDave Hansen  *    page from any node, and let the buddy allocator itself figure
1502099730d6SDave Hansen  *    it out.
1503099730d6SDave Hansen  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1504099730d6SDave Hansen  *    strictly from 'nid'
1505099730d6SDave Hansen  */
1506099730d6SDave Hansen static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1507099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr, int nid)
1508099730d6SDave Hansen {
1509099730d6SDave Hansen 	int order = huge_page_order(h);
1510099730d6SDave Hansen 	gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1511099730d6SDave Hansen 	unsigned int cpuset_mems_cookie;
1512099730d6SDave Hansen 
1513099730d6SDave Hansen 	/*
1514099730d6SDave Hansen 	 * We need a VMA to get a memory policy.  If we do not
1515e0ec90eeSDave Hansen 	 * have one, we use the 'nid' argument.
1516e0ec90eeSDave Hansen 	 *
1517e0ec90eeSDave Hansen 	 * The mempolicy stuff below has some non-inlined bits
1518e0ec90eeSDave Hansen 	 * and calls ->vm_ops.  That makes it hard to optimize at
1519e0ec90eeSDave Hansen 	 * compile-time, even when NUMA is off and it does
1520e0ec90eeSDave Hansen 	 * nothing.  This helps the compiler optimize it out.
1521099730d6SDave Hansen 	 */
1522e0ec90eeSDave Hansen 	if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1523099730d6SDave Hansen 		/*
1524099730d6SDave Hansen 		 * If a specific node is requested, make sure to
1525099730d6SDave Hansen 		 * get memory from there, but only when a node
1526099730d6SDave Hansen 		 * is explicitly specified.
1527099730d6SDave Hansen 		 */
1528099730d6SDave Hansen 		if (nid != NUMA_NO_NODE)
1529099730d6SDave Hansen 			gfp |= __GFP_THISNODE;
1530099730d6SDave Hansen 		/*
1531099730d6SDave Hansen 		 * Make sure to call something that can handle
1532099730d6SDave Hansen 		 * nid=NUMA_NO_NODE
1533099730d6SDave Hansen 		 */
1534099730d6SDave Hansen 		return alloc_pages_node(nid, gfp, order);
1535099730d6SDave Hansen 	}
1536099730d6SDave Hansen 
1537099730d6SDave Hansen 	/*
1538099730d6SDave Hansen 	 * OK, so we have a VMA.  Fetch the mempolicy and try to
1539e0ec90eeSDave Hansen 	 * allocate a huge page with it.  We will only reach this
1540e0ec90eeSDave Hansen 	 * when CONFIG_NUMA=y.
1541099730d6SDave Hansen 	 */
1542099730d6SDave Hansen 	do {
1543099730d6SDave Hansen 		struct page *page;
1544099730d6SDave Hansen 		struct mempolicy *mpol;
1545099730d6SDave Hansen 		struct zonelist *zl;
1546099730d6SDave Hansen 		nodemask_t *nodemask;
1547099730d6SDave Hansen 
1548099730d6SDave Hansen 		cpuset_mems_cookie = read_mems_allowed_begin();
1549099730d6SDave Hansen 		zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1550099730d6SDave Hansen 		mpol_cond_put(mpol);
1551099730d6SDave Hansen 		page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1552099730d6SDave Hansen 		if (page)
1553099730d6SDave Hansen 			return page;
1554099730d6SDave Hansen 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1555099730d6SDave Hansen 
1556099730d6SDave Hansen 	return NULL;
1557099730d6SDave Hansen }
1558099730d6SDave Hansen 
1559099730d6SDave Hansen /*
1560099730d6SDave Hansen  * There are two ways to allocate a huge page:
1561099730d6SDave Hansen  * 1. When you have a VMA and an address (like a fault)
1562099730d6SDave Hansen  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1563099730d6SDave Hansen  *
1564099730d6SDave Hansen  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1565099730d6SDave Hansen  * this case which signifies that the allocation should be done with
1566099730d6SDave Hansen  * respect for the VMA's memory policy.
1567099730d6SDave Hansen  *
1568099730d6SDave Hansen  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1569099730d6SDave Hansen  * implies that memory policies will not be taken in to account.
1570099730d6SDave Hansen  */
1571099730d6SDave Hansen static struct page *__alloc_buddy_huge_page(struct hstate *h,
1572099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr, int nid)
15737893d1d5SAdam Litke {
15747893d1d5SAdam Litke 	struct page *page;
1575bf50bab2SNaoya Horiguchi 	unsigned int r_nid;
15767893d1d5SAdam Litke 
1577bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
1578aa888a74SAndi Kleen 		return NULL;
1579aa888a74SAndi Kleen 
1580d1c3fb1fSNishanth Aravamudan 	/*
1581099730d6SDave Hansen 	 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1582099730d6SDave Hansen 	 * This makes sure the caller is picking _one_ of the modes with which
1583099730d6SDave Hansen 	 * we can call this function, not both.
1584099730d6SDave Hansen 	 */
1585099730d6SDave Hansen 	if (vma || (addr != -1)) {
1586e0ec90eeSDave Hansen 		VM_WARN_ON_ONCE(addr == -1);
1587e0ec90eeSDave Hansen 		VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1588099730d6SDave Hansen 	}
1589099730d6SDave Hansen 	/*
1590d1c3fb1fSNishanth Aravamudan 	 * Assume we will successfully allocate the surplus page to
1591d1c3fb1fSNishanth Aravamudan 	 * prevent racing processes from causing the surplus to exceed
1592d1c3fb1fSNishanth Aravamudan 	 * overcommit
1593d1c3fb1fSNishanth Aravamudan 	 *
1594d1c3fb1fSNishanth Aravamudan 	 * This however introduces a different race, where a process B
1595d1c3fb1fSNishanth Aravamudan 	 * tries to grow the static hugepage pool while alloc_pages() is
1596d1c3fb1fSNishanth Aravamudan 	 * called by process A. B will only examine the per-node
1597d1c3fb1fSNishanth Aravamudan 	 * counters in determining if surplus huge pages can be
1598d1c3fb1fSNishanth Aravamudan 	 * converted to normal huge pages in adjust_pool_surplus(). A
1599d1c3fb1fSNishanth Aravamudan 	 * won't be able to increment the per-node counter, until the
1600d1c3fb1fSNishanth Aravamudan 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1601d1c3fb1fSNishanth Aravamudan 	 * no more huge pages can be converted from surplus to normal
1602d1c3fb1fSNishanth Aravamudan 	 * state (and doesn't try to convert again). Thus, we have a
1603d1c3fb1fSNishanth Aravamudan 	 * case where a surplus huge page exists, the pool is grown, and
1604d1c3fb1fSNishanth Aravamudan 	 * the surplus huge page still exists after, even though it
1605d1c3fb1fSNishanth Aravamudan 	 * should just have been converted to a normal huge page. This
1606d1c3fb1fSNishanth Aravamudan 	 * does not leak memory, though, as the hugepage will be freed
1607d1c3fb1fSNishanth Aravamudan 	 * once it is out of use. It also does not allow the counters to
1608d1c3fb1fSNishanth Aravamudan 	 * go out of whack in adjust_pool_surplus() as we don't modify
1609d1c3fb1fSNishanth Aravamudan 	 * the node values until we've gotten the hugepage and only the
1610d1c3fb1fSNishanth Aravamudan 	 * per-node value is checked there.
1611d1c3fb1fSNishanth Aravamudan 	 */
1612d1c3fb1fSNishanth Aravamudan 	spin_lock(&hugetlb_lock);
1613a5516438SAndi Kleen 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1614d1c3fb1fSNishanth Aravamudan 		spin_unlock(&hugetlb_lock);
1615d1c3fb1fSNishanth Aravamudan 		return NULL;
1616d1c3fb1fSNishanth Aravamudan 	} else {
1617a5516438SAndi Kleen 		h->nr_huge_pages++;
1618a5516438SAndi Kleen 		h->surplus_huge_pages++;
1619d1c3fb1fSNishanth Aravamudan 	}
1620d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
1621d1c3fb1fSNishanth Aravamudan 
1622099730d6SDave Hansen 	page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1623d1c3fb1fSNishanth Aravamudan 
16247893d1d5SAdam Litke 	spin_lock(&hugetlb_lock);
1625d1c3fb1fSNishanth Aravamudan 	if (page) {
16260edaecfaSAneesh Kumar K.V 		INIT_LIST_HEAD(&page->lru);
1627bf50bab2SNaoya Horiguchi 		r_nid = page_to_nid(page);
1628f1e61557SKirill A. Shutemov 		set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
16299dd540e2SAneesh Kumar K.V 		set_hugetlb_cgroup(page, NULL);
1630d1c3fb1fSNishanth Aravamudan 		/*
1631d1c3fb1fSNishanth Aravamudan 		 * We incremented the global counters already
1632d1c3fb1fSNishanth Aravamudan 		 */
1633bf50bab2SNaoya Horiguchi 		h->nr_huge_pages_node[r_nid]++;
1634bf50bab2SNaoya Horiguchi 		h->surplus_huge_pages_node[r_nid]++;
16353b116300SAdam Litke 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1636d1c3fb1fSNishanth Aravamudan 	} else {
1637a5516438SAndi Kleen 		h->nr_huge_pages--;
1638a5516438SAndi Kleen 		h->surplus_huge_pages--;
16393b116300SAdam Litke 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
16407893d1d5SAdam Litke 	}
1641d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
16427893d1d5SAdam Litke 
16437893d1d5SAdam Litke 	return page;
16447893d1d5SAdam Litke }
16457893d1d5SAdam Litke 
1646e4e574b7SAdam Litke /*
1647099730d6SDave Hansen  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1648099730d6SDave Hansen  * NUMA_NO_NODE, which means that it may be allocated
1649099730d6SDave Hansen  * anywhere.
1650099730d6SDave Hansen  */
1651e0ec90eeSDave Hansen static
1652099730d6SDave Hansen struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1653099730d6SDave Hansen {
1654099730d6SDave Hansen 	unsigned long addr = -1;
1655099730d6SDave Hansen 
1656099730d6SDave Hansen 	return __alloc_buddy_huge_page(h, NULL, addr, nid);
1657099730d6SDave Hansen }
1658099730d6SDave Hansen 
1659099730d6SDave Hansen /*
1660099730d6SDave Hansen  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1661099730d6SDave Hansen  */
1662e0ec90eeSDave Hansen static
1663099730d6SDave Hansen struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1664099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr)
1665099730d6SDave Hansen {
1666099730d6SDave Hansen 	return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1667099730d6SDave Hansen }
1668099730d6SDave Hansen 
1669099730d6SDave Hansen /*
1670bf50bab2SNaoya Horiguchi  * This allocation function is useful in the context where vma is irrelevant.
1671bf50bab2SNaoya Horiguchi  * E.g. soft-offlining uses this function because it only cares physical
1672bf50bab2SNaoya Horiguchi  * address of error page.
1673bf50bab2SNaoya Horiguchi  */
1674bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid)
1675bf50bab2SNaoya Horiguchi {
16764ef91848SJoonsoo Kim 	struct page *page = NULL;
1677bf50bab2SNaoya Horiguchi 
1678bf50bab2SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
16794ef91848SJoonsoo Kim 	if (h->free_huge_pages - h->resv_huge_pages > 0)
1680bf50bab2SNaoya Horiguchi 		page = dequeue_huge_page_node(h, nid);
1681bf50bab2SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
1682bf50bab2SNaoya Horiguchi 
168394ae8ba7SAneesh Kumar K.V 	if (!page)
1684099730d6SDave Hansen 		page = __alloc_buddy_huge_page_no_mpol(h, nid);
1685bf50bab2SNaoya Horiguchi 
1686bf50bab2SNaoya Horiguchi 	return page;
1687bf50bab2SNaoya Horiguchi }
1688bf50bab2SNaoya Horiguchi 
1689bf50bab2SNaoya Horiguchi /*
169025985edcSLucas De Marchi  * Increase the hugetlb pool such that it can accommodate a reservation
1691e4e574b7SAdam Litke  * of size 'delta'.
1692e4e574b7SAdam Litke  */
1693a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta)
1694e4e574b7SAdam Litke {
1695e4e574b7SAdam Litke 	struct list_head surplus_list;
1696e4e574b7SAdam Litke 	struct page *page, *tmp;
1697e4e574b7SAdam Litke 	int ret, i;
1698e4e574b7SAdam Litke 	int needed, allocated;
169928073b02SHillf Danton 	bool alloc_ok = true;
1700e4e574b7SAdam Litke 
1701a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1702ac09b3a1SAdam Litke 	if (needed <= 0) {
1703a5516438SAndi Kleen 		h->resv_huge_pages += delta;
1704e4e574b7SAdam Litke 		return 0;
1705ac09b3a1SAdam Litke 	}
1706e4e574b7SAdam Litke 
1707e4e574b7SAdam Litke 	allocated = 0;
1708e4e574b7SAdam Litke 	INIT_LIST_HEAD(&surplus_list);
1709e4e574b7SAdam Litke 
1710e4e574b7SAdam Litke 	ret = -ENOMEM;
1711e4e574b7SAdam Litke retry:
1712e4e574b7SAdam Litke 	spin_unlock(&hugetlb_lock);
1713e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
1714099730d6SDave Hansen 		page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
171528073b02SHillf Danton 		if (!page) {
171628073b02SHillf Danton 			alloc_ok = false;
171728073b02SHillf Danton 			break;
171828073b02SHillf Danton 		}
1719e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
1720e4e574b7SAdam Litke 	}
172128073b02SHillf Danton 	allocated += i;
1722e4e574b7SAdam Litke 
1723e4e574b7SAdam Litke 	/*
1724e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1725e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
1726e4e574b7SAdam Litke 	 */
1727e4e574b7SAdam Litke 	spin_lock(&hugetlb_lock);
1728a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
1729a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
173028073b02SHillf Danton 	if (needed > 0) {
173128073b02SHillf Danton 		if (alloc_ok)
1732e4e574b7SAdam Litke 			goto retry;
173328073b02SHillf Danton 		/*
173428073b02SHillf Danton 		 * We were not able to allocate enough pages to
173528073b02SHillf Danton 		 * satisfy the entire reservation so we free what
173628073b02SHillf Danton 		 * we've allocated so far.
173728073b02SHillf Danton 		 */
173828073b02SHillf Danton 		goto free;
173928073b02SHillf Danton 	}
1740e4e574b7SAdam Litke 	/*
1741e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
174225985edcSLucas De Marchi 	 * needed to accommodate the reservation.  Add the appropriate number
1743e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
1744ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
1745ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
1746ac09b3a1SAdam Litke 	 * before they are reserved.
1747e4e574b7SAdam Litke 	 */
1748e4e574b7SAdam Litke 	needed += allocated;
1749a5516438SAndi Kleen 	h->resv_huge_pages += delta;
1750e4e574b7SAdam Litke 	ret = 0;
1751a9869b83SNaoya Horiguchi 
175219fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
175319fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
175419fc3f0aSAdam Litke 		if ((--needed) < 0)
175519fc3f0aSAdam Litke 			break;
1756a9869b83SNaoya Horiguchi 		/*
1757a9869b83SNaoya Horiguchi 		 * This page is now managed by the hugetlb allocator and has
1758a9869b83SNaoya Horiguchi 		 * no users -- drop the buddy allocator's reference.
1759a9869b83SNaoya Horiguchi 		 */
1760a9869b83SNaoya Horiguchi 		put_page_testzero(page);
1761309381feSSasha Levin 		VM_BUG_ON_PAGE(page_count(page), page);
1762a5516438SAndi Kleen 		enqueue_huge_page(h, page);
176319fc3f0aSAdam Litke 	}
176428073b02SHillf Danton free:
1765b0365c8dSHillf Danton 	spin_unlock(&hugetlb_lock);
176619fc3f0aSAdam Litke 
176719fc3f0aSAdam Litke 	/* Free unnecessary surplus pages to the buddy allocator */
1768c0d934baSJoonsoo Kim 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1769a9869b83SNaoya Horiguchi 		put_page(page);
177019fc3f0aSAdam Litke 	spin_lock(&hugetlb_lock);
1771e4e574b7SAdam Litke 
1772e4e574b7SAdam Litke 	return ret;
1773e4e574b7SAdam Litke }
1774e4e574b7SAdam Litke 
1775e4e574b7SAdam Litke /*
1776e5bbc8a6SMike Kravetz  * This routine has two main purposes:
1777e5bbc8a6SMike Kravetz  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1778e5bbc8a6SMike Kravetz  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1779e5bbc8a6SMike Kravetz  *    to the associated reservation map.
1780e5bbc8a6SMike Kravetz  * 2) Free any unused surplus pages that may have been allocated to satisfy
1781e5bbc8a6SMike Kravetz  *    the reservation.  As many as unused_resv_pages may be freed.
1782e5bbc8a6SMike Kravetz  *
1783e5bbc8a6SMike Kravetz  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1784e5bbc8a6SMike Kravetz  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1785e5bbc8a6SMike Kravetz  * we must make sure nobody else can claim pages we are in the process of
1786e5bbc8a6SMike Kravetz  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1787e5bbc8a6SMike Kravetz  * number of huge pages we plan to free when dropping the lock.
1788e4e574b7SAdam Litke  */
1789a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
1790a5516438SAndi Kleen 					unsigned long unused_resv_pages)
1791e4e574b7SAdam Litke {
1792e4e574b7SAdam Litke 	unsigned long nr_pages;
1793e4e574b7SAdam Litke 
1794aa888a74SAndi Kleen 	/* Cannot return gigantic pages currently */
1795bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
1796e5bbc8a6SMike Kravetz 		goto out;
1797aa888a74SAndi Kleen 
1798e5bbc8a6SMike Kravetz 	/*
1799e5bbc8a6SMike Kravetz 	 * Part (or even all) of the reservation could have been backed
1800e5bbc8a6SMike Kravetz 	 * by pre-allocated pages. Only free surplus pages.
1801e5bbc8a6SMike Kravetz 	 */
1802a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1803e4e574b7SAdam Litke 
1804685f3457SLee Schermerhorn 	/*
1805685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
18069b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
18079b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
18089b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
18099b5e5d0fSLee Schermerhorn 	 * free_pool_huge_page() will balance the the freed pages across the
18109b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
1811e5bbc8a6SMike Kravetz 	 *
1812e5bbc8a6SMike Kravetz 	 * Note that we decrement resv_huge_pages as we free the pages.  If
1813e5bbc8a6SMike Kravetz 	 * we drop the lock, resv_huge_pages will still be sufficiently large
1814e5bbc8a6SMike Kravetz 	 * to cover subsequent pages we may free.
1815685f3457SLee Schermerhorn 	 */
1816685f3457SLee Schermerhorn 	while (nr_pages--) {
1817e5bbc8a6SMike Kravetz 		h->resv_huge_pages--;
1818e5bbc8a6SMike Kravetz 		unused_resv_pages--;
18198cebfcd0SLai Jiangshan 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1820e5bbc8a6SMike Kravetz 			goto out;
18217848a4bfSMizuma, Masayoshi 		cond_resched_lock(&hugetlb_lock);
1822e4e574b7SAdam Litke 	}
1823e5bbc8a6SMike Kravetz 
1824e5bbc8a6SMike Kravetz out:
1825e5bbc8a6SMike Kravetz 	/* Fully uncommit the reservation */
1826e5bbc8a6SMike Kravetz 	h->resv_huge_pages -= unused_resv_pages;
1827e4e574b7SAdam Litke }
1828e4e574b7SAdam Litke 
18295e911373SMike Kravetz 
1830c37f9fb1SAndy Whitcroft /*
1831feba16e2SMike Kravetz  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
18325e911373SMike Kravetz  * are used by the huge page allocation routines to manage reservations.
1833cf3ad20bSMike Kravetz  *
1834cf3ad20bSMike Kravetz  * vma_needs_reservation is called to determine if the huge page at addr
1835cf3ad20bSMike Kravetz  * within the vma has an associated reservation.  If a reservation is
1836cf3ad20bSMike Kravetz  * needed, the value 1 is returned.  The caller is then responsible for
1837cf3ad20bSMike Kravetz  * managing the global reservation and subpool usage counts.  After
1838cf3ad20bSMike Kravetz  * the huge page has been allocated, vma_commit_reservation is called
1839feba16e2SMike Kravetz  * to add the page to the reservation map.  If the page allocation fails,
1840feba16e2SMike Kravetz  * the reservation must be ended instead of committed.  vma_end_reservation
1841feba16e2SMike Kravetz  * is called in such cases.
1842cf3ad20bSMike Kravetz  *
1843cf3ad20bSMike Kravetz  * In the normal case, vma_commit_reservation returns the same value
1844cf3ad20bSMike Kravetz  * as the preceding vma_needs_reservation call.  The only time this
1845cf3ad20bSMike Kravetz  * is not the case is if a reserve map was changed between calls.  It
1846cf3ad20bSMike Kravetz  * is the responsibility of the caller to notice the difference and
1847cf3ad20bSMike Kravetz  * take appropriate action.
184896b96a96SMike Kravetz  *
184996b96a96SMike Kravetz  * vma_add_reservation is used in error paths where a reservation must
185096b96a96SMike Kravetz  * be restored when a newly allocated huge page must be freed.  It is
185196b96a96SMike Kravetz  * to be called after calling vma_needs_reservation to determine if a
185296b96a96SMike Kravetz  * reservation exists.
1853c37f9fb1SAndy Whitcroft  */
18545e911373SMike Kravetz enum vma_resv_mode {
18555e911373SMike Kravetz 	VMA_NEEDS_RESV,
18565e911373SMike Kravetz 	VMA_COMMIT_RESV,
1857feba16e2SMike Kravetz 	VMA_END_RESV,
185896b96a96SMike Kravetz 	VMA_ADD_RESV,
18595e911373SMike Kravetz };
1860cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
1861cf3ad20bSMike Kravetz 				struct vm_area_struct *vma, unsigned long addr,
18625e911373SMike Kravetz 				enum vma_resv_mode mode)
1863c37f9fb1SAndy Whitcroft {
18644e35f483SJoonsoo Kim 	struct resv_map *resv;
18654e35f483SJoonsoo Kim 	pgoff_t idx;
1866cf3ad20bSMike Kravetz 	long ret;
1867c37f9fb1SAndy Whitcroft 
18684e35f483SJoonsoo Kim 	resv = vma_resv_map(vma);
18694e35f483SJoonsoo Kim 	if (!resv)
1870c37f9fb1SAndy Whitcroft 		return 1;
1871c37f9fb1SAndy Whitcroft 
18724e35f483SJoonsoo Kim 	idx = vma_hugecache_offset(h, vma, addr);
18735e911373SMike Kravetz 	switch (mode) {
18745e911373SMike Kravetz 	case VMA_NEEDS_RESV:
1875cf3ad20bSMike Kravetz 		ret = region_chg(resv, idx, idx + 1);
18765e911373SMike Kravetz 		break;
18775e911373SMike Kravetz 	case VMA_COMMIT_RESV:
18785e911373SMike Kravetz 		ret = region_add(resv, idx, idx + 1);
18795e911373SMike Kravetz 		break;
1880feba16e2SMike Kravetz 	case VMA_END_RESV:
18815e911373SMike Kravetz 		region_abort(resv, idx, idx + 1);
18825e911373SMike Kravetz 		ret = 0;
18835e911373SMike Kravetz 		break;
188496b96a96SMike Kravetz 	case VMA_ADD_RESV:
188596b96a96SMike Kravetz 		if (vma->vm_flags & VM_MAYSHARE)
188696b96a96SMike Kravetz 			ret = region_add(resv, idx, idx + 1);
188796b96a96SMike Kravetz 		else {
188896b96a96SMike Kravetz 			region_abort(resv, idx, idx + 1);
188996b96a96SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
189096b96a96SMike Kravetz 		}
189196b96a96SMike Kravetz 		break;
18925e911373SMike Kravetz 	default:
18935e911373SMike Kravetz 		BUG();
18945e911373SMike Kravetz 	}
189584afd99bSAndy Whitcroft 
18964e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE)
1897cf3ad20bSMike Kravetz 		return ret;
189867961f9dSMike Kravetz 	else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
189967961f9dSMike Kravetz 		/*
190067961f9dSMike Kravetz 		 * In most cases, reserves always exist for private mappings.
190167961f9dSMike Kravetz 		 * However, a file associated with mapping could have been
190267961f9dSMike Kravetz 		 * hole punched or truncated after reserves were consumed.
190367961f9dSMike Kravetz 		 * As subsequent fault on such a range will not use reserves.
190467961f9dSMike Kravetz 		 * Subtle - The reserve map for private mappings has the
190567961f9dSMike Kravetz 		 * opposite meaning than that of shared mappings.  If NO
190667961f9dSMike Kravetz 		 * entry is in the reserve map, it means a reservation exists.
190767961f9dSMike Kravetz 		 * If an entry exists in the reserve map, it means the
190867961f9dSMike Kravetz 		 * reservation has already been consumed.  As a result, the
190967961f9dSMike Kravetz 		 * return value of this routine is the opposite of the
191067961f9dSMike Kravetz 		 * value returned from reserve map manipulation routines above.
191167961f9dSMike Kravetz 		 */
191267961f9dSMike Kravetz 		if (ret)
191367961f9dSMike Kravetz 			return 0;
191467961f9dSMike Kravetz 		else
191567961f9dSMike Kravetz 			return 1;
191667961f9dSMike Kravetz 	}
19174e35f483SJoonsoo Kim 	else
1918cf3ad20bSMike Kravetz 		return ret < 0 ? ret : 0;
191984afd99bSAndy Whitcroft }
1920cf3ad20bSMike Kravetz 
1921cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
1922a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
1923c37f9fb1SAndy Whitcroft {
19245e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1925cf3ad20bSMike Kravetz }
1926c37f9fb1SAndy Whitcroft 
1927cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
1928cf3ad20bSMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
1929cf3ad20bSMike Kravetz {
19305e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
19315e911373SMike Kravetz }
19325e911373SMike Kravetz 
1933feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
19345e911373SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
19355e911373SMike Kravetz {
1936feba16e2SMike Kravetz 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1937c37f9fb1SAndy Whitcroft }
1938c37f9fb1SAndy Whitcroft 
193996b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
194096b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
194196b96a96SMike Kravetz {
194296b96a96SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
194396b96a96SMike Kravetz }
194496b96a96SMike Kravetz 
194596b96a96SMike Kravetz /*
194696b96a96SMike Kravetz  * This routine is called to restore a reservation on error paths.  In the
194796b96a96SMike Kravetz  * specific error paths, a huge page was allocated (via alloc_huge_page)
194896b96a96SMike Kravetz  * and is about to be freed.  If a reservation for the page existed,
194996b96a96SMike Kravetz  * alloc_huge_page would have consumed the reservation and set PagePrivate
195096b96a96SMike Kravetz  * in the newly allocated page.  When the page is freed via free_huge_page,
195196b96a96SMike Kravetz  * the global reservation count will be incremented if PagePrivate is set.
195296b96a96SMike Kravetz  * However, free_huge_page can not adjust the reserve map.  Adjust the
195396b96a96SMike Kravetz  * reserve map here to be consistent with global reserve count adjustments
195496b96a96SMike Kravetz  * to be made by free_huge_page.
195596b96a96SMike Kravetz  */
195696b96a96SMike Kravetz static void restore_reserve_on_error(struct hstate *h,
195796b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long address,
195896b96a96SMike Kravetz 			struct page *page)
195996b96a96SMike Kravetz {
196096b96a96SMike Kravetz 	if (unlikely(PagePrivate(page))) {
196196b96a96SMike Kravetz 		long rc = vma_needs_reservation(h, vma, address);
196296b96a96SMike Kravetz 
196396b96a96SMike Kravetz 		if (unlikely(rc < 0)) {
196496b96a96SMike Kravetz 			/*
196596b96a96SMike Kravetz 			 * Rare out of memory condition in reserve map
196696b96a96SMike Kravetz 			 * manipulation.  Clear PagePrivate so that
196796b96a96SMike Kravetz 			 * global reserve count will not be incremented
196896b96a96SMike Kravetz 			 * by free_huge_page.  This will make it appear
196996b96a96SMike Kravetz 			 * as though the reservation for this page was
197096b96a96SMike Kravetz 			 * consumed.  This may prevent the task from
197196b96a96SMike Kravetz 			 * faulting in the page at a later time.  This
197296b96a96SMike Kravetz 			 * is better than inconsistent global huge page
197396b96a96SMike Kravetz 			 * accounting of reserve counts.
197496b96a96SMike Kravetz 			 */
197596b96a96SMike Kravetz 			ClearPagePrivate(page);
197696b96a96SMike Kravetz 		} else if (rc) {
197796b96a96SMike Kravetz 			rc = vma_add_reservation(h, vma, address);
197896b96a96SMike Kravetz 			if (unlikely(rc < 0))
197996b96a96SMike Kravetz 				/*
198096b96a96SMike Kravetz 				 * See above comment about rare out of
198196b96a96SMike Kravetz 				 * memory condition.
198296b96a96SMike Kravetz 				 */
198396b96a96SMike Kravetz 				ClearPagePrivate(page);
198496b96a96SMike Kravetz 		} else
198596b96a96SMike Kravetz 			vma_end_reservation(h, vma, address);
198696b96a96SMike Kravetz 	}
198796b96a96SMike Kravetz }
198896b96a96SMike Kravetz 
198970c3547eSMike Kravetz struct page *alloc_huge_page(struct vm_area_struct *vma,
199004f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
1991348ea204SAdam Litke {
199290481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
1993a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
1994348ea204SAdam Litke 	struct page *page;
1995d85f69b0SMike Kravetz 	long map_chg, map_commit;
1996d85f69b0SMike Kravetz 	long gbl_chg;
19976d76dcf4SAneesh Kumar K.V 	int ret, idx;
19986d76dcf4SAneesh Kumar K.V 	struct hugetlb_cgroup *h_cg;
19992fc39cecSAdam Litke 
20006d76dcf4SAneesh Kumar K.V 	idx = hstate_index(h);
2001a1e78772SMel Gorman 	/*
2002d85f69b0SMike Kravetz 	 * Examine the region/reserve map to determine if the process
2003d85f69b0SMike Kravetz 	 * has a reservation for the page to be allocated.  A return
2004d85f69b0SMike Kravetz 	 * code of zero indicates a reservation exists (no change).
2005a1e78772SMel Gorman 	 */
2006d85f69b0SMike Kravetz 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2007d85f69b0SMike Kravetz 	if (map_chg < 0)
200876dcee75SAneesh Kumar K.V 		return ERR_PTR(-ENOMEM);
2009d85f69b0SMike Kravetz 
2010d85f69b0SMike Kravetz 	/*
2011d85f69b0SMike Kravetz 	 * Processes that did not create the mapping will have no
2012d85f69b0SMike Kravetz 	 * reserves as indicated by the region/reserve map. Check
2013d85f69b0SMike Kravetz 	 * that the allocation will not exceed the subpool limit.
2014d85f69b0SMike Kravetz 	 * Allocations for MAP_NORESERVE mappings also need to be
2015d85f69b0SMike Kravetz 	 * checked against any subpool limit.
2016d85f69b0SMike Kravetz 	 */
2017d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve) {
2018d85f69b0SMike Kravetz 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2019d85f69b0SMike Kravetz 		if (gbl_chg < 0) {
2020feba16e2SMike Kravetz 			vma_end_reservation(h, vma, addr);
202176dcee75SAneesh Kumar K.V 			return ERR_PTR(-ENOSPC);
20225e911373SMike Kravetz 		}
202390d8b7e6SAdam Litke 
2024d85f69b0SMike Kravetz 		/*
2025d85f69b0SMike Kravetz 		 * Even though there was no reservation in the region/reserve
2026d85f69b0SMike Kravetz 		 * map, there could be reservations associated with the
2027d85f69b0SMike Kravetz 		 * subpool that can be used.  This would be indicated if the
2028d85f69b0SMike Kravetz 		 * return value of hugepage_subpool_get_pages() is zero.
2029d85f69b0SMike Kravetz 		 * However, if avoid_reserve is specified we still avoid even
2030d85f69b0SMike Kravetz 		 * the subpool reservations.
2031d85f69b0SMike Kravetz 		 */
2032d85f69b0SMike Kravetz 		if (avoid_reserve)
2033d85f69b0SMike Kravetz 			gbl_chg = 1;
2034d85f69b0SMike Kravetz 	}
2035d85f69b0SMike Kravetz 
20366d76dcf4SAneesh Kumar K.V 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
20378f34af6fSJianyu Zhan 	if (ret)
20388f34af6fSJianyu Zhan 		goto out_subpool_put;
20398f34af6fSJianyu Zhan 
2040a1e78772SMel Gorman 	spin_lock(&hugetlb_lock);
2041d85f69b0SMike Kravetz 	/*
2042d85f69b0SMike Kravetz 	 * glb_chg is passed to indicate whether or not a page must be taken
2043d85f69b0SMike Kravetz 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2044d85f69b0SMike Kravetz 	 * a reservation exists for the allocation.
2045d85f69b0SMike Kravetz 	 */
2046d85f69b0SMike Kravetz 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
204781a6fcaeSJoonsoo Kim 	if (!page) {
204894ae8ba7SAneesh Kumar K.V 		spin_unlock(&hugetlb_lock);
2049099730d6SDave Hansen 		page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
20508f34af6fSJianyu Zhan 		if (!page)
20518f34af6fSJianyu Zhan 			goto out_uncharge_cgroup;
2052a88c7695SNaoya Horiguchi 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2053a88c7695SNaoya Horiguchi 			SetPagePrivate(page);
2054a88c7695SNaoya Horiguchi 			h->resv_huge_pages--;
2055a88c7695SNaoya Horiguchi 		}
205679dbb236SAneesh Kumar K.V 		spin_lock(&hugetlb_lock);
205779dbb236SAneesh Kumar K.V 		list_move(&page->lru, &h->hugepage_activelist);
205881a6fcaeSJoonsoo Kim 		/* Fall through */
2059a1e78772SMel Gorman 	}
206081a6fcaeSJoonsoo Kim 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
206181a6fcaeSJoonsoo Kim 	spin_unlock(&hugetlb_lock);
2062a1e78772SMel Gorman 
206390481622SDavid Gibson 	set_page_private(page, (unsigned long)spool);
2064a1e78772SMel Gorman 
2065d85f69b0SMike Kravetz 	map_commit = vma_commit_reservation(h, vma, addr);
2066d85f69b0SMike Kravetz 	if (unlikely(map_chg > map_commit)) {
206733039678SMike Kravetz 		/*
206833039678SMike Kravetz 		 * The page was added to the reservation map between
206933039678SMike Kravetz 		 * vma_needs_reservation and vma_commit_reservation.
207033039678SMike Kravetz 		 * This indicates a race with hugetlb_reserve_pages.
207133039678SMike Kravetz 		 * Adjust for the subpool count incremented above AND
207233039678SMike Kravetz 		 * in hugetlb_reserve_pages for the same page.  Also,
207333039678SMike Kravetz 		 * the reservation count added in hugetlb_reserve_pages
207433039678SMike Kravetz 		 * no longer applies.
207533039678SMike Kravetz 		 */
207633039678SMike Kravetz 		long rsv_adjust;
207733039678SMike Kravetz 
207833039678SMike Kravetz 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
207933039678SMike Kravetz 		hugetlb_acct_memory(h, -rsv_adjust);
208033039678SMike Kravetz 	}
20817893d1d5SAdam Litke 	return page;
20828f34af6fSJianyu Zhan 
20838f34af6fSJianyu Zhan out_uncharge_cgroup:
20848f34af6fSJianyu Zhan 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
20858f34af6fSJianyu Zhan out_subpool_put:
2086d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve)
20878f34af6fSJianyu Zhan 		hugepage_subpool_put_pages(spool, 1);
2088feba16e2SMike Kravetz 	vma_end_reservation(h, vma, addr);
20898f34af6fSJianyu Zhan 	return ERR_PTR(-ENOSPC);
2090b45b5bd6SDavid Gibson }
2091b45b5bd6SDavid Gibson 
209274060e4dSNaoya Horiguchi /*
209374060e4dSNaoya Horiguchi  * alloc_huge_page()'s wrapper which simply returns the page if allocation
209474060e4dSNaoya Horiguchi  * succeeds, otherwise NULL. This function is called from new_vma_page(),
209574060e4dSNaoya Horiguchi  * where no ERR_VALUE is expected to be returned.
209674060e4dSNaoya Horiguchi  */
209774060e4dSNaoya Horiguchi struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
209874060e4dSNaoya Horiguchi 				unsigned long addr, int avoid_reserve)
209974060e4dSNaoya Horiguchi {
210074060e4dSNaoya Horiguchi 	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
210174060e4dSNaoya Horiguchi 	if (IS_ERR(page))
210274060e4dSNaoya Horiguchi 		page = NULL;
210374060e4dSNaoya Horiguchi 	return page;
210474060e4dSNaoya Horiguchi }
210574060e4dSNaoya Horiguchi 
210691f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h)
2107aa888a74SAndi Kleen {
2108aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
2109b2261026SJoonsoo Kim 	int nr_nodes, node;
2110aa888a74SAndi Kleen 
2111b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2112aa888a74SAndi Kleen 		void *addr;
2113aa888a74SAndi Kleen 
21148b89a116SGrygorii Strashko 		addr = memblock_virt_alloc_try_nid_nopanic(
21158b89a116SGrygorii Strashko 				huge_page_size(h), huge_page_size(h),
21168b89a116SGrygorii Strashko 				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2117aa888a74SAndi Kleen 		if (addr) {
2118aa888a74SAndi Kleen 			/*
2119aa888a74SAndi Kleen 			 * Use the beginning of the huge page to store the
2120aa888a74SAndi Kleen 			 * huge_bootmem_page struct (until gather_bootmem
2121aa888a74SAndi Kleen 			 * puts them into the mem_map).
2122aa888a74SAndi Kleen 			 */
2123aa888a74SAndi Kleen 			m = addr;
2124aa888a74SAndi Kleen 			goto found;
2125aa888a74SAndi Kleen 		}
2126aa888a74SAndi Kleen 	}
2127aa888a74SAndi Kleen 	return 0;
2128aa888a74SAndi Kleen 
2129aa888a74SAndi Kleen found:
2130df994eadSLuiz Capitulino 	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2131aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
2132aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
2133aa888a74SAndi Kleen 	m->hstate = h;
2134aa888a74SAndi Kleen 	return 1;
2135aa888a74SAndi Kleen }
2136aa888a74SAndi Kleen 
2137d00181b9SKirill A. Shutemov static void __init prep_compound_huge_page(struct page *page,
2138d00181b9SKirill A. Shutemov 		unsigned int order)
213918229df5SAndy Whitcroft {
214018229df5SAndy Whitcroft 	if (unlikely(order > (MAX_ORDER - 1)))
214118229df5SAndy Whitcroft 		prep_compound_gigantic_page(page, order);
214218229df5SAndy Whitcroft 	else
214318229df5SAndy Whitcroft 		prep_compound_page(page, order);
214418229df5SAndy Whitcroft }
214518229df5SAndy Whitcroft 
2146aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */
2147aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
2148aa888a74SAndi Kleen {
2149aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
2150aa888a74SAndi Kleen 
2151aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
2152aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
2153ee8f248dSBecky Bruce 		struct page *page;
2154ee8f248dSBecky Bruce 
2155ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM
2156ee8f248dSBecky Bruce 		page = pfn_to_page(m->phys >> PAGE_SHIFT);
21578b89a116SGrygorii Strashko 		memblock_free_late(__pa(m),
2158ee8f248dSBecky Bruce 				   sizeof(struct huge_bootmem_page));
2159ee8f248dSBecky Bruce #else
2160ee8f248dSBecky Bruce 		page = virt_to_page(m);
2161ee8f248dSBecky Bruce #endif
2162aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
216318229df5SAndy Whitcroft 		prep_compound_huge_page(page, h->order);
2164ef5a22beSAndrea Arcangeli 		WARN_ON(PageReserved(page));
2165aa888a74SAndi Kleen 		prep_new_huge_page(h, page, page_to_nid(page));
2166b0320c7bSRafael Aquini 		/*
2167b0320c7bSRafael Aquini 		 * If we had gigantic hugepages allocated at boot time, we need
2168b0320c7bSRafael Aquini 		 * to restore the 'stolen' pages to totalram_pages in order to
2169b0320c7bSRafael Aquini 		 * fix confusing memory reports from free(1) and another
2170b0320c7bSRafael Aquini 		 * side-effects, like CommitLimit going negative.
2171b0320c7bSRafael Aquini 		 */
2172bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h))
21733dcc0571SJiang Liu 			adjust_managed_page_count(page, 1 << h->order);
2174aa888a74SAndi Kleen 	}
2175aa888a74SAndi Kleen }
2176aa888a74SAndi Kleen 
21778faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
21781da177e4SLinus Torvalds {
21791da177e4SLinus Torvalds 	unsigned long i;
21801da177e4SLinus Torvalds 
2181e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
2182bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h)) {
2183aa888a74SAndi Kleen 			if (!alloc_bootmem_huge_page(h))
2184aa888a74SAndi Kleen 				break;
21859b5e5d0fSLee Schermerhorn 		} else if (!alloc_fresh_huge_page(h,
21868cebfcd0SLai Jiangshan 					 &node_states[N_MEMORY]))
21871da177e4SLinus Torvalds 			break;
21881da177e4SLinus Torvalds 	}
21898faa8b07SAndi Kleen 	h->max_huge_pages = i;
2190e5ff2159SAndi Kleen }
2191e5ff2159SAndi Kleen 
2192e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
2193e5ff2159SAndi Kleen {
2194e5ff2159SAndi Kleen 	struct hstate *h;
2195e5ff2159SAndi Kleen 
2196e5ff2159SAndi Kleen 	for_each_hstate(h) {
2197641844f5SNaoya Horiguchi 		if (minimum_order > huge_page_order(h))
2198641844f5SNaoya Horiguchi 			minimum_order = huge_page_order(h);
2199641844f5SNaoya Horiguchi 
22008faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
2201bae7f4aeSLuiz Capitulino 		if (!hstate_is_gigantic(h))
22028faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
2203e5ff2159SAndi Kleen 	}
2204641844f5SNaoya Horiguchi 	VM_BUG_ON(minimum_order == UINT_MAX);
2205e5ff2159SAndi Kleen }
2206e5ff2159SAndi Kleen 
22074abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n)
22084abd32dbSAndi Kleen {
22094abd32dbSAndi Kleen 	if (n >= (1UL << 30))
22104abd32dbSAndi Kleen 		sprintf(buf, "%lu GB", n >> 30);
22114abd32dbSAndi Kleen 	else if (n >= (1UL << 20))
22124abd32dbSAndi Kleen 		sprintf(buf, "%lu MB", n >> 20);
22134abd32dbSAndi Kleen 	else
22144abd32dbSAndi Kleen 		sprintf(buf, "%lu KB", n >> 10);
22154abd32dbSAndi Kleen 	return buf;
22164abd32dbSAndi Kleen }
22174abd32dbSAndi Kleen 
2218e5ff2159SAndi Kleen static void __init report_hugepages(void)
2219e5ff2159SAndi Kleen {
2220e5ff2159SAndi Kleen 	struct hstate *h;
2221e5ff2159SAndi Kleen 
2222e5ff2159SAndi Kleen 	for_each_hstate(h) {
22234abd32dbSAndi Kleen 		char buf[32];
2224ffb22af5SAndrew Morton 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
22254abd32dbSAndi Kleen 			memfmt(buf, huge_page_size(h)),
22264abd32dbSAndi Kleen 			h->free_huge_pages);
2227e5ff2159SAndi Kleen 	}
2228e5ff2159SAndi Kleen }
2229e5ff2159SAndi Kleen 
22301da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
22316ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
22326ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22331da177e4SLinus Torvalds {
22344415cc8dSChristoph Lameter 	int i;
22354415cc8dSChristoph Lameter 
2236bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2237aa888a74SAndi Kleen 		return;
2238aa888a74SAndi Kleen 
22396ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
22401da177e4SLinus Torvalds 		struct page *page, *next;
2241a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
2242a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
2243a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
22446b0c880dSAdam Litke 				return;
22451da177e4SLinus Torvalds 			if (PageHighMem(page))
22461da177e4SLinus Torvalds 				continue;
22471da177e4SLinus Torvalds 			list_del(&page->lru);
2248e5ff2159SAndi Kleen 			update_and_free_page(h, page);
2249a5516438SAndi Kleen 			h->free_huge_pages--;
2250a5516438SAndi Kleen 			h->free_huge_pages_node[page_to_nid(page)]--;
22511da177e4SLinus Torvalds 		}
22521da177e4SLinus Torvalds 	}
22531da177e4SLinus Torvalds }
22541da177e4SLinus Torvalds #else
22556ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
22566ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22571da177e4SLinus Torvalds {
22581da177e4SLinus Torvalds }
22591da177e4SLinus Torvalds #endif
22601da177e4SLinus Torvalds 
226120a0307cSWu Fengguang /*
226220a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
226320a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
226420a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
226520a0307cSWu Fengguang  */
22666ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
22676ae11b27SLee Schermerhorn 				int delta)
226820a0307cSWu Fengguang {
2269b2261026SJoonsoo Kim 	int nr_nodes, node;
227020a0307cSWu Fengguang 
227120a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
227220a0307cSWu Fengguang 
2273e8c5c824SLee Schermerhorn 	if (delta < 0) {
2274b2261026SJoonsoo Kim 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2275b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node])
2276b2261026SJoonsoo Kim 				goto found;
2277b2261026SJoonsoo Kim 		}
2278b2261026SJoonsoo Kim 	} else {
2279b2261026SJoonsoo Kim 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2280b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node] <
2281b2261026SJoonsoo Kim 					h->nr_huge_pages_node[node])
2282b2261026SJoonsoo Kim 				goto found;
2283e8c5c824SLee Schermerhorn 		}
22849a76db09SLee Schermerhorn 	}
2285b2261026SJoonsoo Kim 	return 0;
228620a0307cSWu Fengguang 
2287b2261026SJoonsoo Kim found:
228820a0307cSWu Fengguang 	h->surplus_huge_pages += delta;
2289b2261026SJoonsoo Kim 	h->surplus_huge_pages_node[node] += delta;
2290b2261026SJoonsoo Kim 	return 1;
229120a0307cSWu Fengguang }
229220a0307cSWu Fengguang 
2293a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
22946ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
22956ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
22961da177e4SLinus Torvalds {
22977893d1d5SAdam Litke 	unsigned long min_count, ret;
22981da177e4SLinus Torvalds 
2299944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported())
2300aa888a74SAndi Kleen 		return h->max_huge_pages;
2301aa888a74SAndi Kleen 
23027893d1d5SAdam Litke 	/*
23037893d1d5SAdam Litke 	 * Increase the pool size
23047893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
23057893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
2306d1c3fb1fSNishanth Aravamudan 	 *
2307d15c7c09SNaoya Horiguchi 	 * We might race with __alloc_buddy_huge_page() here and be unable
2308d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
2309d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
2310d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
2311d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
23127893d1d5SAdam Litke 	 */
23131da177e4SLinus Torvalds 	spin_lock(&hugetlb_lock);
2314a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
23156ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
23167893d1d5SAdam Litke 			break;
23177893d1d5SAdam Litke 	}
23187893d1d5SAdam Litke 
2319a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
23207893d1d5SAdam Litke 		/*
23217893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
23227893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
23237893d1d5SAdam Litke 		 * and reducing the surplus.
23247893d1d5SAdam Litke 		 */
23257893d1d5SAdam Litke 		spin_unlock(&hugetlb_lock);
2326649920c6SJia He 
2327649920c6SJia He 		/* yield cpu to avoid soft lockup */
2328649920c6SJia He 		cond_resched();
2329649920c6SJia He 
2330944d9fecSLuiz Capitulino 		if (hstate_is_gigantic(h))
2331944d9fecSLuiz Capitulino 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2332944d9fecSLuiz Capitulino 		else
23336ae11b27SLee Schermerhorn 			ret = alloc_fresh_huge_page(h, nodes_allowed);
23347893d1d5SAdam Litke 		spin_lock(&hugetlb_lock);
23357893d1d5SAdam Litke 		if (!ret)
23367893d1d5SAdam Litke 			goto out;
23377893d1d5SAdam Litke 
2338536240f2SMel Gorman 		/* Bail for signals. Probably ctrl-c from user */
2339536240f2SMel Gorman 		if (signal_pending(current))
2340536240f2SMel Gorman 			goto out;
23417893d1d5SAdam Litke 	}
23427893d1d5SAdam Litke 
23437893d1d5SAdam Litke 	/*
23447893d1d5SAdam Litke 	 * Decrease the pool size
23457893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
23467893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
23477893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
23487893d1d5SAdam Litke 	 * to the desired size as pages become free.
2349d1c3fb1fSNishanth Aravamudan 	 *
2350d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
2351d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
2352d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
2353d15c7c09SNaoya Horiguchi 	 * __alloc_buddy_huge_page() is checking the global counter,
2354d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
2355d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
2356d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
23577893d1d5SAdam Litke 	 */
2358a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
23596b0c880dSAdam Litke 	min_count = max(count, min_count);
23606ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
2361a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
23626ae11b27SLee Schermerhorn 		if (!free_pool_huge_page(h, nodes_allowed, 0))
23631da177e4SLinus Torvalds 			break;
236455f67141SMizuma, Masayoshi 		cond_resched_lock(&hugetlb_lock);
23651da177e4SLinus Torvalds 	}
2366a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
23676ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
23687893d1d5SAdam Litke 			break;
23697893d1d5SAdam Litke 	}
23707893d1d5SAdam Litke out:
2371a5516438SAndi Kleen 	ret = persistent_huge_pages(h);
23721da177e4SLinus Torvalds 	spin_unlock(&hugetlb_lock);
23737893d1d5SAdam Litke 	return ret;
23741da177e4SLinus Torvalds }
23751da177e4SLinus Torvalds 
2376a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
2377a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2378a3437870SNishanth Aravamudan 
2379a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
2380a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = \
2381a3437870SNishanth Aravamudan 		__ATTR(_name, 0644, _name##_show, _name##_store)
2382a3437870SNishanth Aravamudan 
2383a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
2384a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2385a3437870SNishanth Aravamudan 
23869a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
23879a305230SLee Schermerhorn 
23889a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2389a3437870SNishanth Aravamudan {
2390a3437870SNishanth Aravamudan 	int i;
23919a305230SLee Schermerhorn 
2392a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
23939a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
23949a305230SLee Schermerhorn 			if (nidp)
23959a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
2396a3437870SNishanth Aravamudan 			return &hstates[i];
23979a305230SLee Schermerhorn 		}
23989a305230SLee Schermerhorn 
23999a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
2400a3437870SNishanth Aravamudan }
2401a3437870SNishanth Aravamudan 
240206808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2403a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2404a3437870SNishanth Aravamudan {
24059a305230SLee Schermerhorn 	struct hstate *h;
24069a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
24079a305230SLee Schermerhorn 	int nid;
24089a305230SLee Schermerhorn 
24099a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
24109a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
24119a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
24129a305230SLee Schermerhorn 	else
24139a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
24149a305230SLee Schermerhorn 
24159a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", nr_huge_pages);
2416a3437870SNishanth Aravamudan }
2417adbe8726SEric B Munson 
2418238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2419238d3c13SDavid Rientjes 					   struct hstate *h, int nid,
2420238d3c13SDavid Rientjes 					   unsigned long count, size_t len)
2421a3437870SNishanth Aravamudan {
2422a3437870SNishanth Aravamudan 	int err;
2423bad44b5bSDavid Rientjes 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2424a3437870SNishanth Aravamudan 
2425944d9fecSLuiz Capitulino 	if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2426adbe8726SEric B Munson 		err = -EINVAL;
2427adbe8726SEric B Munson 		goto out;
2428adbe8726SEric B Munson 	}
2429adbe8726SEric B Munson 
24309a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
24319a305230SLee Schermerhorn 		/*
24329a305230SLee Schermerhorn 		 * global hstate attribute
24339a305230SLee Schermerhorn 		 */
24349a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
24359a305230SLee Schermerhorn 				init_nodemask_of_mempolicy(nodes_allowed))) {
243606808b08SLee Schermerhorn 			NODEMASK_FREE(nodes_allowed);
24378cebfcd0SLai Jiangshan 			nodes_allowed = &node_states[N_MEMORY];
243806808b08SLee Schermerhorn 		}
24399a305230SLee Schermerhorn 	} else if (nodes_allowed) {
24409a305230SLee Schermerhorn 		/*
24419a305230SLee Schermerhorn 		 * per node hstate attribute: adjust count to global,
24429a305230SLee Schermerhorn 		 * but restrict alloc/free to the specified node.
24439a305230SLee Schermerhorn 		 */
24449a305230SLee Schermerhorn 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
24459a305230SLee Schermerhorn 		init_nodemask_of_node(nodes_allowed, nid);
24469a305230SLee Schermerhorn 	} else
24478cebfcd0SLai Jiangshan 		nodes_allowed = &node_states[N_MEMORY];
24489a305230SLee Schermerhorn 
244906808b08SLee Schermerhorn 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2450a3437870SNishanth Aravamudan 
24518cebfcd0SLai Jiangshan 	if (nodes_allowed != &node_states[N_MEMORY])
245206808b08SLee Schermerhorn 		NODEMASK_FREE(nodes_allowed);
245306808b08SLee Schermerhorn 
245406808b08SLee Schermerhorn 	return len;
2455adbe8726SEric B Munson out:
2456adbe8726SEric B Munson 	NODEMASK_FREE(nodes_allowed);
2457adbe8726SEric B Munson 	return err;
245806808b08SLee Schermerhorn }
245906808b08SLee Schermerhorn 
2460238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2461238d3c13SDavid Rientjes 					 struct kobject *kobj, const char *buf,
2462238d3c13SDavid Rientjes 					 size_t len)
2463238d3c13SDavid Rientjes {
2464238d3c13SDavid Rientjes 	struct hstate *h;
2465238d3c13SDavid Rientjes 	unsigned long count;
2466238d3c13SDavid Rientjes 	int nid;
2467238d3c13SDavid Rientjes 	int err;
2468238d3c13SDavid Rientjes 
2469238d3c13SDavid Rientjes 	err = kstrtoul(buf, 10, &count);
2470238d3c13SDavid Rientjes 	if (err)
2471238d3c13SDavid Rientjes 		return err;
2472238d3c13SDavid Rientjes 
2473238d3c13SDavid Rientjes 	h = kobj_to_hstate(kobj, &nid);
2474238d3c13SDavid Rientjes 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2475238d3c13SDavid Rientjes }
2476238d3c13SDavid Rientjes 
247706808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
247806808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
247906808b08SLee Schermerhorn {
248006808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
248106808b08SLee Schermerhorn }
248206808b08SLee Schermerhorn 
248306808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
248406808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
248506808b08SLee Schermerhorn {
2486238d3c13SDavid Rientjes 	return nr_hugepages_store_common(false, kobj, buf, len);
2487a3437870SNishanth Aravamudan }
2488a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
2489a3437870SNishanth Aravamudan 
249006808b08SLee Schermerhorn #ifdef CONFIG_NUMA
249106808b08SLee Schermerhorn 
249206808b08SLee Schermerhorn /*
249306808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
249406808b08SLee Schermerhorn  * huge page alloc/free.
249506808b08SLee Schermerhorn  */
249606808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
249706808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
249806808b08SLee Schermerhorn {
249906808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
250006808b08SLee Schermerhorn }
250106808b08SLee Schermerhorn 
250206808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
250306808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
250406808b08SLee Schermerhorn {
2505238d3c13SDavid Rientjes 	return nr_hugepages_store_common(true, kobj, buf, len);
250606808b08SLee Schermerhorn }
250706808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
250806808b08SLee Schermerhorn #endif
250906808b08SLee Schermerhorn 
251006808b08SLee Schermerhorn 
2511a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2512a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2513a3437870SNishanth Aravamudan {
25149a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2515a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2516a3437870SNishanth Aravamudan }
2517adbe8726SEric B Munson 
2518a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2519a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
2520a3437870SNishanth Aravamudan {
2521a3437870SNishanth Aravamudan 	int err;
2522a3437870SNishanth Aravamudan 	unsigned long input;
25239a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2524a3437870SNishanth Aravamudan 
2525bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2526adbe8726SEric B Munson 		return -EINVAL;
2527adbe8726SEric B Munson 
25283dbb95f7SJingoo Han 	err = kstrtoul(buf, 10, &input);
2529a3437870SNishanth Aravamudan 	if (err)
253073ae31e5SEric B Munson 		return err;
2531a3437870SNishanth Aravamudan 
2532a3437870SNishanth Aravamudan 	spin_lock(&hugetlb_lock);
2533a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
2534a3437870SNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
2535a3437870SNishanth Aravamudan 
2536a3437870SNishanth Aravamudan 	return count;
2537a3437870SNishanth Aravamudan }
2538a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
2539a3437870SNishanth Aravamudan 
2540a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
2541a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2542a3437870SNishanth Aravamudan {
25439a305230SLee Schermerhorn 	struct hstate *h;
25449a305230SLee Schermerhorn 	unsigned long free_huge_pages;
25459a305230SLee Schermerhorn 	int nid;
25469a305230SLee Schermerhorn 
25479a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
25489a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
25499a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
25509a305230SLee Schermerhorn 	else
25519a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
25529a305230SLee Schermerhorn 
25539a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", free_huge_pages);
2554a3437870SNishanth Aravamudan }
2555a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
2556a3437870SNishanth Aravamudan 
2557a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
2558a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2559a3437870SNishanth Aravamudan {
25609a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
2561a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
2562a3437870SNishanth Aravamudan }
2563a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
2564a3437870SNishanth Aravamudan 
2565a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
2566a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
2567a3437870SNishanth Aravamudan {
25689a305230SLee Schermerhorn 	struct hstate *h;
25699a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
25709a305230SLee Schermerhorn 	int nid;
25719a305230SLee Schermerhorn 
25729a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
25739a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
25749a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
25759a305230SLee Schermerhorn 	else
25769a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
25779a305230SLee Schermerhorn 
25789a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", surplus_huge_pages);
2579a3437870SNishanth Aravamudan }
2580a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
2581a3437870SNishanth Aravamudan 
2582a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
2583a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
2584a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
2585a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
2586a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
2587a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
258806808b08SLee Schermerhorn #ifdef CONFIG_NUMA
258906808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
259006808b08SLee Schermerhorn #endif
2591a3437870SNishanth Aravamudan 	NULL,
2592a3437870SNishanth Aravamudan };
2593a3437870SNishanth Aravamudan 
2594a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = {
2595a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
2596a3437870SNishanth Aravamudan };
2597a3437870SNishanth Aravamudan 
2598094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
25999a305230SLee Schermerhorn 				    struct kobject **hstate_kobjs,
26009a305230SLee Schermerhorn 				    struct attribute_group *hstate_attr_group)
2601a3437870SNishanth Aravamudan {
2602a3437870SNishanth Aravamudan 	int retval;
2603972dc4deSAneesh Kumar K.V 	int hi = hstate_index(h);
2604a3437870SNishanth Aravamudan 
26059a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
26069a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
2607a3437870SNishanth Aravamudan 		return -ENOMEM;
2608a3437870SNishanth Aravamudan 
26099a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2610a3437870SNishanth Aravamudan 	if (retval)
26119a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
2612a3437870SNishanth Aravamudan 
2613a3437870SNishanth Aravamudan 	return retval;
2614a3437870SNishanth Aravamudan }
2615a3437870SNishanth Aravamudan 
2616a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void)
2617a3437870SNishanth Aravamudan {
2618a3437870SNishanth Aravamudan 	struct hstate *h;
2619a3437870SNishanth Aravamudan 	int err;
2620a3437870SNishanth Aravamudan 
2621a3437870SNishanth Aravamudan 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2622a3437870SNishanth Aravamudan 	if (!hugepages_kobj)
2623a3437870SNishanth Aravamudan 		return;
2624a3437870SNishanth Aravamudan 
2625a3437870SNishanth Aravamudan 	for_each_hstate(h) {
26269a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
26279a305230SLee Schermerhorn 					 hstate_kobjs, &hstate_attr_group);
2628a3437870SNishanth Aravamudan 		if (err)
2629ffb22af5SAndrew Morton 			pr_err("Hugetlb: Unable to add hstate %s", h->name);
2630a3437870SNishanth Aravamudan 	}
2631a3437870SNishanth Aravamudan }
2632a3437870SNishanth Aravamudan 
26339a305230SLee Schermerhorn #ifdef CONFIG_NUMA
26349a305230SLee Schermerhorn 
26359a305230SLee Schermerhorn /*
26369a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
263710fbcf4cSKay Sievers  * with node devices in node_devices[] using a parallel array.  The array
263810fbcf4cSKay Sievers  * index of a node device or _hstate == node id.
263910fbcf4cSKay Sievers  * This is here to avoid any static dependency of the node device driver, in
26409a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
26419a305230SLee Schermerhorn  */
26429a305230SLee Schermerhorn struct node_hstate {
26439a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
26449a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
26459a305230SLee Schermerhorn };
2646b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
26479a305230SLee Schermerhorn 
26489a305230SLee Schermerhorn /*
264910fbcf4cSKay Sievers  * A subset of global hstate attributes for node devices
26509a305230SLee Schermerhorn  */
26519a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
26529a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
26539a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
26549a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
26559a305230SLee Schermerhorn 	NULL,
26569a305230SLee Schermerhorn };
26579a305230SLee Schermerhorn 
26589a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = {
26599a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
26609a305230SLee Schermerhorn };
26619a305230SLee Schermerhorn 
26629a305230SLee Schermerhorn /*
266310fbcf4cSKay Sievers  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
26649a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
26659a305230SLee Schermerhorn  */
26669a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
26679a305230SLee Schermerhorn {
26689a305230SLee Schermerhorn 	int nid;
26699a305230SLee Schermerhorn 
26709a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
26719a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
26729a305230SLee Schermerhorn 		int i;
26739a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
26749a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
26759a305230SLee Schermerhorn 				if (nidp)
26769a305230SLee Schermerhorn 					*nidp = nid;
26779a305230SLee Schermerhorn 				return &hstates[i];
26789a305230SLee Schermerhorn 			}
26799a305230SLee Schermerhorn 	}
26809a305230SLee Schermerhorn 
26819a305230SLee Schermerhorn 	BUG();
26829a305230SLee Schermerhorn 	return NULL;
26839a305230SLee Schermerhorn }
26849a305230SLee Schermerhorn 
26859a305230SLee Schermerhorn /*
268610fbcf4cSKay Sievers  * Unregister hstate attributes from a single node device.
26879a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
26889a305230SLee Schermerhorn  */
26893cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node)
26909a305230SLee Schermerhorn {
26919a305230SLee Schermerhorn 	struct hstate *h;
269210fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
26939a305230SLee Schermerhorn 
26949a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
26959b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
26969a305230SLee Schermerhorn 
2697972dc4deSAneesh Kumar K.V 	for_each_hstate(h) {
2698972dc4deSAneesh Kumar K.V 		int idx = hstate_index(h);
2699972dc4deSAneesh Kumar K.V 		if (nhs->hstate_kobjs[idx]) {
2700972dc4deSAneesh Kumar K.V 			kobject_put(nhs->hstate_kobjs[idx]);
2701972dc4deSAneesh Kumar K.V 			nhs->hstate_kobjs[idx] = NULL;
2702972dc4deSAneesh Kumar K.V 		}
27039a305230SLee Schermerhorn 	}
27049a305230SLee Schermerhorn 
27059a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
27069a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
27079a305230SLee Schermerhorn }
27089a305230SLee Schermerhorn 
27099a305230SLee Schermerhorn 
27109a305230SLee Schermerhorn /*
271110fbcf4cSKay Sievers  * Register hstate attributes for a single node device.
27129a305230SLee Schermerhorn  * No-op if attributes already registered.
27139a305230SLee Schermerhorn  */
27143cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node)
27159a305230SLee Schermerhorn {
27169a305230SLee Schermerhorn 	struct hstate *h;
271710fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
27189a305230SLee Schermerhorn 	int err;
27199a305230SLee Schermerhorn 
27209a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
27219a305230SLee Schermerhorn 		return;		/* already allocated */
27229a305230SLee Schermerhorn 
27239a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
272410fbcf4cSKay Sievers 							&node->dev.kobj);
27259a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
27269a305230SLee Schermerhorn 		return;
27279a305230SLee Schermerhorn 
27289a305230SLee Schermerhorn 	for_each_hstate(h) {
27299a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
27309a305230SLee Schermerhorn 						nhs->hstate_kobjs,
27319a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
27329a305230SLee Schermerhorn 		if (err) {
2733ffb22af5SAndrew Morton 			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
273410fbcf4cSKay Sievers 				h->name, node->dev.id);
27359a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
27369a305230SLee Schermerhorn 			break;
27379a305230SLee Schermerhorn 		}
27389a305230SLee Schermerhorn 	}
27399a305230SLee Schermerhorn }
27409a305230SLee Schermerhorn 
27419a305230SLee Schermerhorn /*
27429b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
274310fbcf4cSKay Sievers  * devices of nodes that have memory.  All on-line nodes should have
274410fbcf4cSKay Sievers  * registered their associated device by this time.
27459a305230SLee Schermerhorn  */
27467d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
27479a305230SLee Schermerhorn {
27489a305230SLee Schermerhorn 	int nid;
27499a305230SLee Schermerhorn 
27508cebfcd0SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
27518732794bSWen Congyang 		struct node *node = node_devices[nid];
275210fbcf4cSKay Sievers 		if (node->dev.id == nid)
27539a305230SLee Schermerhorn 			hugetlb_register_node(node);
27549a305230SLee Schermerhorn 	}
27559a305230SLee Schermerhorn 
27569a305230SLee Schermerhorn 	/*
275710fbcf4cSKay Sievers 	 * Let the node device driver know we're here so it can
27589a305230SLee Schermerhorn 	 * [un]register hstate attributes on node hotplug.
27599a305230SLee Schermerhorn 	 */
27609a305230SLee Schermerhorn 	register_hugetlbfs_with_node(hugetlb_register_node,
27619a305230SLee Schermerhorn 				     hugetlb_unregister_node);
27629a305230SLee Schermerhorn }
27639a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
27649a305230SLee Schermerhorn 
27659a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
27669a305230SLee Schermerhorn {
27679a305230SLee Schermerhorn 	BUG();
27689a305230SLee Schermerhorn 	if (nidp)
27699a305230SLee Schermerhorn 		*nidp = -1;
27709a305230SLee Schermerhorn 	return NULL;
27719a305230SLee Schermerhorn }
27729a305230SLee Schermerhorn 
27739a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
27749a305230SLee Schermerhorn 
27759a305230SLee Schermerhorn #endif
27769a305230SLee Schermerhorn 
2777a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
2778a3437870SNishanth Aravamudan {
27798382d914SDavidlohr Bueso 	int i;
27808382d914SDavidlohr Bueso 
2781457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
27820ef89d25SBenjamin Herrenschmidt 		return 0;
2783a3437870SNishanth Aravamudan 
2784e11bfbfcSNick Piggin 	if (!size_to_hstate(default_hstate_size)) {
2785e11bfbfcSNick Piggin 		default_hstate_size = HPAGE_SIZE;
2786e11bfbfcSNick Piggin 		if (!size_to_hstate(default_hstate_size))
2787a3437870SNishanth Aravamudan 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2788a3437870SNishanth Aravamudan 	}
2789972dc4deSAneesh Kumar K.V 	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2790f8b74815SVaishali Thakkar 	if (default_hstate_max_huge_pages) {
2791f8b74815SVaishali Thakkar 		if (!default_hstate.max_huge_pages)
2792e11bfbfcSNick Piggin 			default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2793f8b74815SVaishali Thakkar 	}
2794a3437870SNishanth Aravamudan 
2795a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
2796aa888a74SAndi Kleen 	gather_bootmem_prealloc();
2797a3437870SNishanth Aravamudan 	report_hugepages();
2798a3437870SNishanth Aravamudan 
2799a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
28009a305230SLee Schermerhorn 	hugetlb_register_all_nodes();
28017179e7bfSJianguo Wu 	hugetlb_cgroup_file_init();
28029a305230SLee Schermerhorn 
28038382d914SDavidlohr Bueso #ifdef CONFIG_SMP
28048382d914SDavidlohr Bueso 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
28058382d914SDavidlohr Bueso #else
28068382d914SDavidlohr Bueso 	num_fault_mutexes = 1;
28078382d914SDavidlohr Bueso #endif
2808c672c7f2SMike Kravetz 	hugetlb_fault_mutex_table =
28098382d914SDavidlohr Bueso 		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2810c672c7f2SMike Kravetz 	BUG_ON(!hugetlb_fault_mutex_table);
28118382d914SDavidlohr Bueso 
28128382d914SDavidlohr Bueso 	for (i = 0; i < num_fault_mutexes; i++)
2813c672c7f2SMike Kravetz 		mutex_init(&hugetlb_fault_mutex_table[i]);
2814a3437870SNishanth Aravamudan 	return 0;
2815a3437870SNishanth Aravamudan }
28163e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
2817a3437870SNishanth Aravamudan 
2818a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */
28199fee021dSVaishali Thakkar void __init hugetlb_bad_size(void)
28209fee021dSVaishali Thakkar {
28219fee021dSVaishali Thakkar 	parsed_valid_hugepagesz = false;
28229fee021dSVaishali Thakkar }
28239fee021dSVaishali Thakkar 
2824d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
2825a3437870SNishanth Aravamudan {
2826a3437870SNishanth Aravamudan 	struct hstate *h;
28278faa8b07SAndi Kleen 	unsigned long i;
28288faa8b07SAndi Kleen 
2829a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
2830598d8091SJoe Perches 		pr_warn("hugepagesz= specified twice, ignoring\n");
2831a3437870SNishanth Aravamudan 		return;
2832a3437870SNishanth Aravamudan 	}
283347d38344SAneesh Kumar K.V 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2834a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
283547d38344SAneesh Kumar K.V 	h = &hstates[hugetlb_max_hstate++];
2836a3437870SNishanth Aravamudan 	h->order = order;
2837a3437870SNishanth Aravamudan 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
28388faa8b07SAndi Kleen 	h->nr_huge_pages = 0;
28398faa8b07SAndi Kleen 	h->free_huge_pages = 0;
28408faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
28418faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
28420edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&h->hugepage_activelist);
284354f18d35SAndrew Morton 	h->next_nid_to_alloc = first_memory_node;
284454f18d35SAndrew Morton 	h->next_nid_to_free = first_memory_node;
2845a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2846a3437870SNishanth Aravamudan 					huge_page_size(h)/1024);
28478faa8b07SAndi Kleen 
2848a3437870SNishanth Aravamudan 	parsed_hstate = h;
2849a3437870SNishanth Aravamudan }
2850a3437870SNishanth Aravamudan 
2851e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s)
2852a3437870SNishanth Aravamudan {
2853a3437870SNishanth Aravamudan 	unsigned long *mhp;
28548faa8b07SAndi Kleen 	static unsigned long *last_mhp;
2855a3437870SNishanth Aravamudan 
28569fee021dSVaishali Thakkar 	if (!parsed_valid_hugepagesz) {
28579fee021dSVaishali Thakkar 		pr_warn("hugepages = %s preceded by "
28589fee021dSVaishali Thakkar 			"an unsupported hugepagesz, ignoring\n", s);
28599fee021dSVaishali Thakkar 		parsed_valid_hugepagesz = true;
28609fee021dSVaishali Thakkar 		return 1;
28619fee021dSVaishali Thakkar 	}
2862a3437870SNishanth Aravamudan 	/*
286347d38344SAneesh Kumar K.V 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2864a3437870SNishanth Aravamudan 	 * so this hugepages= parameter goes to the "default hstate".
2865a3437870SNishanth Aravamudan 	 */
28669fee021dSVaishali Thakkar 	else if (!hugetlb_max_hstate)
2867a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
2868a3437870SNishanth Aravamudan 	else
2869a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
2870a3437870SNishanth Aravamudan 
28718faa8b07SAndi Kleen 	if (mhp == last_mhp) {
2872598d8091SJoe Perches 		pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
28738faa8b07SAndi Kleen 		return 1;
28748faa8b07SAndi Kleen 	}
28758faa8b07SAndi Kleen 
2876a3437870SNishanth Aravamudan 	if (sscanf(s, "%lu", mhp) <= 0)
2877a3437870SNishanth Aravamudan 		*mhp = 0;
2878a3437870SNishanth Aravamudan 
28798faa8b07SAndi Kleen 	/*
28808faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
28818faa8b07SAndi Kleen 	 * But we need to allocate >= MAX_ORDER hstates here early to still
28828faa8b07SAndi Kleen 	 * use the bootmem allocator.
28838faa8b07SAndi Kleen 	 */
288447d38344SAneesh Kumar K.V 	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
28858faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
28868faa8b07SAndi Kleen 
28878faa8b07SAndi Kleen 	last_mhp = mhp;
28888faa8b07SAndi Kleen 
2889a3437870SNishanth Aravamudan 	return 1;
2890a3437870SNishanth Aravamudan }
2891e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup);
2892e11bfbfcSNick Piggin 
2893e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s)
2894e11bfbfcSNick Piggin {
2895e11bfbfcSNick Piggin 	default_hstate_size = memparse(s, &s);
2896e11bfbfcSNick Piggin 	return 1;
2897e11bfbfcSNick Piggin }
2898e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup);
2899a3437870SNishanth Aravamudan 
29008a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array)
29018a213460SNishanth Aravamudan {
29028a213460SNishanth Aravamudan 	int node;
29038a213460SNishanth Aravamudan 	unsigned int nr = 0;
29048a213460SNishanth Aravamudan 
29058a213460SNishanth Aravamudan 	for_each_node_mask(node, cpuset_current_mems_allowed)
29068a213460SNishanth Aravamudan 		nr += array[node];
29078a213460SNishanth Aravamudan 
29088a213460SNishanth Aravamudan 	return nr;
29098a213460SNishanth Aravamudan }
29108a213460SNishanth Aravamudan 
29118a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
291206808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
291306808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
291406808b08SLee Schermerhorn 			 void __user *buffer, size_t *length, loff_t *ppos)
29151da177e4SLinus Torvalds {
2916e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
2917238d3c13SDavid Rientjes 	unsigned long tmp = h->max_huge_pages;
291808d4a246SMichal Hocko 	int ret;
2919e5ff2159SAndi Kleen 
2920457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
292186613628SJan Stancek 		return -EOPNOTSUPP;
2922457c1b27SNishanth Aravamudan 
2923e5ff2159SAndi Kleen 	table->data = &tmp;
2924e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
292508d4a246SMichal Hocko 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
292608d4a246SMichal Hocko 	if (ret)
292708d4a246SMichal Hocko 		goto out;
2928e5ff2159SAndi Kleen 
2929238d3c13SDavid Rientjes 	if (write)
2930238d3c13SDavid Rientjes 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
2931238d3c13SDavid Rientjes 						  NUMA_NO_NODE, tmp, *length);
293208d4a246SMichal Hocko out:
293308d4a246SMichal Hocko 	return ret;
29341da177e4SLinus Torvalds }
2935396faf03SMel Gorman 
293606808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
293706808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
293806808b08SLee Schermerhorn {
293906808b08SLee Schermerhorn 
294006808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
294106808b08SLee Schermerhorn 							buffer, length, ppos);
294206808b08SLee Schermerhorn }
294306808b08SLee Schermerhorn 
294406808b08SLee Schermerhorn #ifdef CONFIG_NUMA
294506808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
294606808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
294706808b08SLee Schermerhorn {
294806808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
294906808b08SLee Schermerhorn 							buffer, length, ppos);
295006808b08SLee Schermerhorn }
295106808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
295206808b08SLee Schermerhorn 
2953a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
29548d65af78SAlexey Dobriyan 			void __user *buffer,
2955a3d0c6aaSNishanth Aravamudan 			size_t *length, loff_t *ppos)
2956a3d0c6aaSNishanth Aravamudan {
2957a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
2958e5ff2159SAndi Kleen 	unsigned long tmp;
295908d4a246SMichal Hocko 	int ret;
2960e5ff2159SAndi Kleen 
2961457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
296286613628SJan Stancek 		return -EOPNOTSUPP;
2963457c1b27SNishanth Aravamudan 
2964e5ff2159SAndi Kleen 	tmp = h->nr_overcommit_huge_pages;
2965e5ff2159SAndi Kleen 
2966bae7f4aeSLuiz Capitulino 	if (write && hstate_is_gigantic(h))
2967adbe8726SEric B Munson 		return -EINVAL;
2968adbe8726SEric B Munson 
2969e5ff2159SAndi Kleen 	table->data = &tmp;
2970e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
297108d4a246SMichal Hocko 	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
297208d4a246SMichal Hocko 	if (ret)
297308d4a246SMichal Hocko 		goto out;
2974e5ff2159SAndi Kleen 
2975e5ff2159SAndi Kleen 	if (write) {
2976064d9efeSNishanth Aravamudan 		spin_lock(&hugetlb_lock);
2977e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
2978a3d0c6aaSNishanth Aravamudan 		spin_unlock(&hugetlb_lock);
2979e5ff2159SAndi Kleen 	}
298008d4a246SMichal Hocko out:
298108d4a246SMichal Hocko 	return ret;
2982a3d0c6aaSNishanth Aravamudan }
2983a3d0c6aaSNishanth Aravamudan 
29841da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
29851da177e4SLinus Torvalds 
2986e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
29871da177e4SLinus Torvalds {
2988a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
2989457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
2990457c1b27SNishanth Aravamudan 		return;
2991e1759c21SAlexey Dobriyan 	seq_printf(m,
29921da177e4SLinus Torvalds 			"HugePages_Total:   %5lu\n"
29931da177e4SLinus Torvalds 			"HugePages_Free:    %5lu\n"
2994b45b5bd6SDavid Gibson 			"HugePages_Rsvd:    %5lu\n"
29957893d1d5SAdam Litke 			"HugePages_Surp:    %5lu\n"
29964f98a2feSRik van Riel 			"Hugepagesize:   %8lu kB\n",
2997a5516438SAndi Kleen 			h->nr_huge_pages,
2998a5516438SAndi Kleen 			h->free_huge_pages,
2999a5516438SAndi Kleen 			h->resv_huge_pages,
3000a5516438SAndi Kleen 			h->surplus_huge_pages,
3001a5516438SAndi Kleen 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
30021da177e4SLinus Torvalds }
30031da177e4SLinus Torvalds 
30041da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf)
30051da177e4SLinus Torvalds {
3006a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
3007457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
3008457c1b27SNishanth Aravamudan 		return 0;
30091da177e4SLinus Torvalds 	return sprintf(buf,
30101da177e4SLinus Torvalds 		"Node %d HugePages_Total: %5u\n"
3011a1de0919SNishanth Aravamudan 		"Node %d HugePages_Free:  %5u\n"
3012a1de0919SNishanth Aravamudan 		"Node %d HugePages_Surp:  %5u\n",
3013a5516438SAndi Kleen 		nid, h->nr_huge_pages_node[nid],
3014a5516438SAndi Kleen 		nid, h->free_huge_pages_node[nid],
3015a5516438SAndi Kleen 		nid, h->surplus_huge_pages_node[nid]);
30161da177e4SLinus Torvalds }
30171da177e4SLinus Torvalds 
3018949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void)
3019949f7ec5SDavid Rientjes {
3020949f7ec5SDavid Rientjes 	struct hstate *h;
3021949f7ec5SDavid Rientjes 	int nid;
3022949f7ec5SDavid Rientjes 
3023457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
3024457c1b27SNishanth Aravamudan 		return;
3025457c1b27SNishanth Aravamudan 
3026949f7ec5SDavid Rientjes 	for_each_node_state(nid, N_MEMORY)
3027949f7ec5SDavid Rientjes 		for_each_hstate(h)
3028949f7ec5SDavid Rientjes 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3029949f7ec5SDavid Rientjes 				nid,
3030949f7ec5SDavid Rientjes 				h->nr_huge_pages_node[nid],
3031949f7ec5SDavid Rientjes 				h->free_huge_pages_node[nid],
3032949f7ec5SDavid Rientjes 				h->surplus_huge_pages_node[nid],
3033949f7ec5SDavid Rientjes 				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3034949f7ec5SDavid Rientjes }
3035949f7ec5SDavid Rientjes 
30365d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
30375d317b2bSNaoya Horiguchi {
30385d317b2bSNaoya Horiguchi 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
30395d317b2bSNaoya Horiguchi 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
30405d317b2bSNaoya Horiguchi }
30415d317b2bSNaoya Horiguchi 
30421da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
30431da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
30441da177e4SLinus Torvalds {
3045d0028588SWanpeng Li 	struct hstate *h;
3046d0028588SWanpeng Li 	unsigned long nr_total_pages = 0;
3047d0028588SWanpeng Li 
3048d0028588SWanpeng Li 	for_each_hstate(h)
3049d0028588SWanpeng Li 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3050d0028588SWanpeng Li 	return nr_total_pages;
30511da177e4SLinus Torvalds }
30521da177e4SLinus Torvalds 
3053a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
3054fc1b8a73SMel Gorman {
3055fc1b8a73SMel Gorman 	int ret = -ENOMEM;
3056fc1b8a73SMel Gorman 
3057fc1b8a73SMel Gorman 	spin_lock(&hugetlb_lock);
3058fc1b8a73SMel Gorman 	/*
3059fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
3060fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
3061fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
3062fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
3063fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
3064fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
3065fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
3066fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
3067fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
3068fc1b8a73SMel Gorman 	 *
3069fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
3070fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
3071fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
3072fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
3073fc1b8a73SMel Gorman 	 * semantics that cpuset has.
3074fc1b8a73SMel Gorman 	 */
3075fc1b8a73SMel Gorman 	if (delta > 0) {
3076a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
3077fc1b8a73SMel Gorman 			goto out;
3078fc1b8a73SMel Gorman 
3079a5516438SAndi Kleen 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3080a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
3081fc1b8a73SMel Gorman 			goto out;
3082fc1b8a73SMel Gorman 		}
3083fc1b8a73SMel Gorman 	}
3084fc1b8a73SMel Gorman 
3085fc1b8a73SMel Gorman 	ret = 0;
3086fc1b8a73SMel Gorman 	if (delta < 0)
3087a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
3088fc1b8a73SMel Gorman 
3089fc1b8a73SMel Gorman out:
3090fc1b8a73SMel Gorman 	spin_unlock(&hugetlb_lock);
3091fc1b8a73SMel Gorman 	return ret;
3092fc1b8a73SMel Gorman }
3093fc1b8a73SMel Gorman 
309484afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
309584afd99bSAndy Whitcroft {
3096f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
309784afd99bSAndy Whitcroft 
309884afd99bSAndy Whitcroft 	/*
309984afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
310084afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
310184afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
310225985edcSLucas De Marchi 	 * has a reference to the reservation map it cannot disappear until
310384afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
310484afd99bSAndy Whitcroft 	 * new reference here without additional locking.
310584afd99bSAndy Whitcroft 	 */
31064e35f483SJoonsoo Kim 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3107f522c3acSJoonsoo Kim 		kref_get(&resv->refs);
310884afd99bSAndy Whitcroft }
310984afd99bSAndy Whitcroft 
3110a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3111a1e78772SMel Gorman {
3112a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3113f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
311490481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
31154e35f483SJoonsoo Kim 	unsigned long reserve, start, end;
31161c5ecae3SMike Kravetz 	long gbl_reserve;
311784afd99bSAndy Whitcroft 
31184e35f483SJoonsoo Kim 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
31194e35f483SJoonsoo Kim 		return;
31204e35f483SJoonsoo Kim 
3121a5516438SAndi Kleen 	start = vma_hugecache_offset(h, vma, vma->vm_start);
3122a5516438SAndi Kleen 	end = vma_hugecache_offset(h, vma, vma->vm_end);
312384afd99bSAndy Whitcroft 
31244e35f483SJoonsoo Kim 	reserve = (end - start) - region_count(resv, start, end);
312584afd99bSAndy Whitcroft 
3126f031dd27SJoonsoo Kim 	kref_put(&resv->refs, resv_map_release);
312784afd99bSAndy Whitcroft 
31287251ff78SAdam Litke 	if (reserve) {
31291c5ecae3SMike Kravetz 		/*
31301c5ecae3SMike Kravetz 		 * Decrement reserve counts.  The global reserve count may be
31311c5ecae3SMike Kravetz 		 * adjusted if the subpool has a minimum size.
31321c5ecae3SMike Kravetz 		 */
31331c5ecae3SMike Kravetz 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
31341c5ecae3SMike Kravetz 		hugetlb_acct_memory(h, -gbl_reserve);
31357251ff78SAdam Litke 	}
3136a1e78772SMel Gorman }
3137a1e78772SMel Gorman 
31381da177e4SLinus Torvalds /*
31391da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
31401da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
31411da177e4SLinus Torvalds  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
31421da177e4SLinus Torvalds  * this far.
31431da177e4SLinus Torvalds  */
3144d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
31451da177e4SLinus Torvalds {
31461da177e4SLinus Torvalds 	BUG();
3147d0217ac0SNick Piggin 	return 0;
31481da177e4SLinus Torvalds }
31491da177e4SLinus Torvalds 
3150f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
3151d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
315284afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
3153a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
31541da177e4SLinus Torvalds };
31551da177e4SLinus Torvalds 
31561e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
31571e8f889bSDavid Gibson 				int writable)
315863551ae0SDavid Gibson {
315963551ae0SDavid Gibson 	pte_t entry;
316063551ae0SDavid Gibson 
31611e8f889bSDavid Gibson 	if (writable) {
3162106c992aSGerald Schaefer 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3163106c992aSGerald Schaefer 					 vma->vm_page_prot)));
316463551ae0SDavid Gibson 	} else {
3165106c992aSGerald Schaefer 		entry = huge_pte_wrprotect(mk_huge_pte(page,
3166106c992aSGerald Schaefer 					   vma->vm_page_prot));
316763551ae0SDavid Gibson 	}
316863551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
316963551ae0SDavid Gibson 	entry = pte_mkhuge(entry);
3170d9ed9faaSChris Metcalf 	entry = arch_make_huge_pte(entry, vma, page, writable);
317163551ae0SDavid Gibson 
317263551ae0SDavid Gibson 	return entry;
317363551ae0SDavid Gibson }
317463551ae0SDavid Gibson 
31751e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
31761e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
31771e8f889bSDavid Gibson {
31781e8f889bSDavid Gibson 	pte_t entry;
31791e8f889bSDavid Gibson 
3180106c992aSGerald Schaefer 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
318132f84528SChris Forbes 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
31824b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
31831e8f889bSDavid Gibson }
31841e8f889bSDavid Gibson 
31854a705fefSNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte)
31864a705fefSNaoya Horiguchi {
31874a705fefSNaoya Horiguchi 	swp_entry_t swp;
31884a705fefSNaoya Horiguchi 
31894a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
31904a705fefSNaoya Horiguchi 		return 0;
31914a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
31924a705fefSNaoya Horiguchi 	if (non_swap_entry(swp) && is_migration_entry(swp))
31934a705fefSNaoya Horiguchi 		return 1;
31944a705fefSNaoya Horiguchi 	else
31954a705fefSNaoya Horiguchi 		return 0;
31964a705fefSNaoya Horiguchi }
31974a705fefSNaoya Horiguchi 
31984a705fefSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte)
31994a705fefSNaoya Horiguchi {
32004a705fefSNaoya Horiguchi 	swp_entry_t swp;
32014a705fefSNaoya Horiguchi 
32024a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
32034a705fefSNaoya Horiguchi 		return 0;
32044a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
32054a705fefSNaoya Horiguchi 	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
32064a705fefSNaoya Horiguchi 		return 1;
32074a705fefSNaoya Horiguchi 	else
32084a705fefSNaoya Horiguchi 		return 0;
32094a705fefSNaoya Horiguchi }
32101e8f889bSDavid Gibson 
321163551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
321263551ae0SDavid Gibson 			    struct vm_area_struct *vma)
321363551ae0SDavid Gibson {
321463551ae0SDavid Gibson 	pte_t *src_pte, *dst_pte, entry;
321563551ae0SDavid Gibson 	struct page *ptepage;
32161c59827dSHugh Dickins 	unsigned long addr;
32171e8f889bSDavid Gibson 	int cow;
3218a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3219a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
3220e8569dd2SAndreas Sandberg 	unsigned long mmun_start;	/* For mmu_notifiers */
3221e8569dd2SAndreas Sandberg 	unsigned long mmun_end;		/* For mmu_notifiers */
3222e8569dd2SAndreas Sandberg 	int ret = 0;
32231e8f889bSDavid Gibson 
32241e8f889bSDavid Gibson 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
322563551ae0SDavid Gibson 
3226e8569dd2SAndreas Sandberg 	mmun_start = vma->vm_start;
3227e8569dd2SAndreas Sandberg 	mmun_end = vma->vm_end;
3228e8569dd2SAndreas Sandberg 	if (cow)
3229e8569dd2SAndreas Sandberg 		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3230e8569dd2SAndreas Sandberg 
3231a5516438SAndi Kleen 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3232cb900f41SKirill A. Shutemov 		spinlock_t *src_ptl, *dst_ptl;
3233c74df32cSHugh Dickins 		src_pte = huge_pte_offset(src, addr);
3234c74df32cSHugh Dickins 		if (!src_pte)
3235c74df32cSHugh Dickins 			continue;
3236a5516438SAndi Kleen 		dst_pte = huge_pte_alloc(dst, addr, sz);
3237e8569dd2SAndreas Sandberg 		if (!dst_pte) {
3238e8569dd2SAndreas Sandberg 			ret = -ENOMEM;
3239e8569dd2SAndreas Sandberg 			break;
3240e8569dd2SAndreas Sandberg 		}
3241c5c99429SLarry Woodman 
3242c5c99429SLarry Woodman 		/* If the pagetables are shared don't copy or take references */
3243c5c99429SLarry Woodman 		if (dst_pte == src_pte)
3244c5c99429SLarry Woodman 			continue;
3245c5c99429SLarry Woodman 
3246cb900f41SKirill A. Shutemov 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
3247cb900f41SKirill A. Shutemov 		src_ptl = huge_pte_lockptr(h, src, src_pte);
3248cb900f41SKirill A. Shutemov 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
32494a705fefSNaoya Horiguchi 		entry = huge_ptep_get(src_pte);
32504a705fefSNaoya Horiguchi 		if (huge_pte_none(entry)) { /* skip none entry */
32514a705fefSNaoya Horiguchi 			;
32524a705fefSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
32534a705fefSNaoya Horiguchi 				    is_hugetlb_entry_hwpoisoned(entry))) {
32544a705fefSNaoya Horiguchi 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
32554a705fefSNaoya Horiguchi 
32564a705fefSNaoya Horiguchi 			if (is_write_migration_entry(swp_entry) && cow) {
32574a705fefSNaoya Horiguchi 				/*
32584a705fefSNaoya Horiguchi 				 * COW mappings require pages in both
32594a705fefSNaoya Horiguchi 				 * parent and child to be set to read.
32604a705fefSNaoya Horiguchi 				 */
32614a705fefSNaoya Horiguchi 				make_migration_entry_read(&swp_entry);
32624a705fefSNaoya Horiguchi 				entry = swp_entry_to_pte(swp_entry);
32634a705fefSNaoya Horiguchi 				set_huge_pte_at(src, addr, src_pte, entry);
32644a705fefSNaoya Horiguchi 			}
32654a705fefSNaoya Horiguchi 			set_huge_pte_at(dst, addr, dst_pte, entry);
32664a705fefSNaoya Horiguchi 		} else {
326734ee645eSJoerg Roedel 			if (cow) {
32687f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
326934ee645eSJoerg Roedel 				mmu_notifier_invalidate_range(src, mmun_start,
327034ee645eSJoerg Roedel 								   mmun_end);
327134ee645eSJoerg Roedel 			}
32720253d634SNaoya Horiguchi 			entry = huge_ptep_get(src_pte);
327363551ae0SDavid Gibson 			ptepage = pte_page(entry);
327463551ae0SDavid Gibson 			get_page(ptepage);
327553f9263bSKirill A. Shutemov 			page_dup_rmap(ptepage, true);
327663551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
32775d317b2bSNaoya Horiguchi 			hugetlb_count_add(pages_per_huge_page(h), dst);
32781c59827dSHugh Dickins 		}
3279cb900f41SKirill A. Shutemov 		spin_unlock(src_ptl);
3280cb900f41SKirill A. Shutemov 		spin_unlock(dst_ptl);
328163551ae0SDavid Gibson 	}
328263551ae0SDavid Gibson 
3283e8569dd2SAndreas Sandberg 	if (cow)
3284e8569dd2SAndreas Sandberg 		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3285e8569dd2SAndreas Sandberg 
3286e8569dd2SAndreas Sandberg 	return ret;
328763551ae0SDavid Gibson }
328863551ae0SDavid Gibson 
328924669e58SAneesh Kumar K.V void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
329024669e58SAneesh Kumar K.V 			    unsigned long start, unsigned long end,
329124669e58SAneesh Kumar K.V 			    struct page *ref_page)
329263551ae0SDavid Gibson {
329363551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
329463551ae0SDavid Gibson 	unsigned long address;
3295c7546f8fSDavid Gibson 	pte_t *ptep;
329663551ae0SDavid Gibson 	pte_t pte;
3297cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
329863551ae0SDavid Gibson 	struct page *page;
3299a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3300a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
33012ec74c3eSSagi Grimberg 	const unsigned long mmun_start = start;	/* For mmu_notifiers */
33022ec74c3eSSagi Grimberg 	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
3303a5516438SAndi Kleen 
330463551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
3305a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
3306a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
330763551ae0SDavid Gibson 
330807e32661SAneesh Kumar K.V 	/*
330907e32661SAneesh Kumar K.V 	 * This is a hugetlb vma, all the pte entries should point
331007e32661SAneesh Kumar K.V 	 * to huge page.
331107e32661SAneesh Kumar K.V 	 */
331207e32661SAneesh Kumar K.V 	tlb_remove_check_page_size_change(tlb, sz);
331324669e58SAneesh Kumar K.V 	tlb_start_vma(tlb, vma);
33142ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3315569f48b8SHillf Danton 	address = start;
3316569f48b8SHillf Danton 	for (; address < end; address += sz) {
3317c7546f8fSDavid Gibson 		ptep = huge_pte_offset(mm, address);
3318c7546f8fSDavid Gibson 		if (!ptep)
3319c7546f8fSDavid Gibson 			continue;
3320c7546f8fSDavid Gibson 
3321cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
332231d49da5SAneesh Kumar K.V 		if (huge_pmd_unshare(mm, &address, ptep)) {
332331d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
332431d49da5SAneesh Kumar K.V 			continue;
332531d49da5SAneesh Kumar K.V 		}
332639dde65cSChen, Kenneth W 
33276629326bSHillf Danton 		pte = huge_ptep_get(ptep);
332831d49da5SAneesh Kumar K.V 		if (huge_pte_none(pte)) {
332931d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
333031d49da5SAneesh Kumar K.V 			continue;
333131d49da5SAneesh Kumar K.V 		}
33326629326bSHillf Danton 
33336629326bSHillf Danton 		/*
33349fbc1f63SNaoya Horiguchi 		 * Migrating hugepage or HWPoisoned hugepage is already
33359fbc1f63SNaoya Horiguchi 		 * unmapped and its refcount is dropped, so just clear pte here.
33366629326bSHillf Danton 		 */
33379fbc1f63SNaoya Horiguchi 		if (unlikely(!pte_present(pte))) {
3338106c992aSGerald Schaefer 			huge_pte_clear(mm, address, ptep);
333931d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
334031d49da5SAneesh Kumar K.V 			continue;
33418c4894c6SNaoya Horiguchi 		}
33426629326bSHillf Danton 
33436629326bSHillf Danton 		page = pte_page(pte);
334404f2cbe3SMel Gorman 		/*
334504f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
334604f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
334704f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
334804f2cbe3SMel Gorman 		 */
334904f2cbe3SMel Gorman 		if (ref_page) {
335031d49da5SAneesh Kumar K.V 			if (page != ref_page) {
335131d49da5SAneesh Kumar K.V 				spin_unlock(ptl);
335231d49da5SAneesh Kumar K.V 				continue;
335331d49da5SAneesh Kumar K.V 			}
335404f2cbe3SMel Gorman 			/*
335504f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
335604f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
335704f2cbe3SMel Gorman 			 * looking like data was lost
335804f2cbe3SMel Gorman 			 */
335904f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
336004f2cbe3SMel Gorman 		}
336104f2cbe3SMel Gorman 
3362c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
3363b528e4b6SAneesh Kumar K.V 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3364106c992aSGerald Schaefer 		if (huge_pte_dirty(pte))
33656649a386SKen Chen 			set_page_dirty(page);
33669e81130bSHillf Danton 
33675d317b2bSNaoya Horiguchi 		hugetlb_count_sub(pages_per_huge_page(h), mm);
3368d281ee61SKirill A. Shutemov 		page_remove_rmap(page, true);
336931d49da5SAneesh Kumar K.V 
3370cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
3371e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, huge_page_size(h));
337224669e58SAneesh Kumar K.V 		/*
337331d49da5SAneesh Kumar K.V 		 * Bail out after unmapping reference page if supplied
337424669e58SAneesh Kumar K.V 		 */
337531d49da5SAneesh Kumar K.V 		if (ref_page)
337631d49da5SAneesh Kumar K.V 			break;
3377fe1668aeSChen, Kenneth W 	}
33782ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
337924669e58SAneesh Kumar K.V 	tlb_end_vma(tlb, vma);
33801da177e4SLinus Torvalds }
338163551ae0SDavid Gibson 
3382d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3383d833352aSMel Gorman 			  struct vm_area_struct *vma, unsigned long start,
3384d833352aSMel Gorman 			  unsigned long end, struct page *ref_page)
3385d833352aSMel Gorman {
3386d833352aSMel Gorman 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
3387d833352aSMel Gorman 
3388d833352aSMel Gorman 	/*
3389d833352aSMel Gorman 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3390d833352aSMel Gorman 	 * test will fail on a vma being torn down, and not grab a page table
3391d833352aSMel Gorman 	 * on its way out.  We're lucky that the flag has such an appropriate
3392d833352aSMel Gorman 	 * name, and can in fact be safely cleared here. We could clear it
3393d833352aSMel Gorman 	 * before the __unmap_hugepage_range above, but all that's necessary
3394c8c06efaSDavidlohr Bueso 	 * is to clear it before releasing the i_mmap_rwsem. This works
3395d833352aSMel Gorman 	 * because in the context this is called, the VMA is about to be
3396c8c06efaSDavidlohr Bueso 	 * destroyed and the i_mmap_rwsem is held.
3397d833352aSMel Gorman 	 */
3398d833352aSMel Gorman 	vma->vm_flags &= ~VM_MAYSHARE;
3399d833352aSMel Gorman }
3400d833352aSMel Gorman 
3401502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
340204f2cbe3SMel Gorman 			  unsigned long end, struct page *ref_page)
3403502717f4SChen, Kenneth W {
340424669e58SAneesh Kumar K.V 	struct mm_struct *mm;
340524669e58SAneesh Kumar K.V 	struct mmu_gather tlb;
340624669e58SAneesh Kumar K.V 
340724669e58SAneesh Kumar K.V 	mm = vma->vm_mm;
340824669e58SAneesh Kumar K.V 
34092b047252SLinus Torvalds 	tlb_gather_mmu(&tlb, mm, start, end);
341024669e58SAneesh Kumar K.V 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
341124669e58SAneesh Kumar K.V 	tlb_finish_mmu(&tlb, start, end);
3412502717f4SChen, Kenneth W }
3413502717f4SChen, Kenneth W 
341404f2cbe3SMel Gorman /*
341504f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
341604f2cbe3SMel Gorman  * mappping it owns the reserve page for. The intention is to unmap the page
341704f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
341804f2cbe3SMel Gorman  * same region.
341904f2cbe3SMel Gorman  */
34202f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
34212a4b3dedSHarvey Harrison 			      struct page *page, unsigned long address)
342204f2cbe3SMel Gorman {
34237526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
342404f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
342504f2cbe3SMel Gorman 	struct address_space *mapping;
342604f2cbe3SMel Gorman 	pgoff_t pgoff;
342704f2cbe3SMel Gorman 
342804f2cbe3SMel Gorman 	/*
342904f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
343004f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
343104f2cbe3SMel Gorman 	 */
34327526674dSAdam Litke 	address = address & huge_page_mask(h);
343336e4f20aSMichal Hocko 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
343436e4f20aSMichal Hocko 			vma->vm_pgoff;
343593c76a3dSAl Viro 	mapping = vma->vm_file->f_mapping;
343604f2cbe3SMel Gorman 
34374eb2b1dcSMel Gorman 	/*
34384eb2b1dcSMel Gorman 	 * Take the mapping lock for the duration of the table walk. As
34394eb2b1dcSMel Gorman 	 * this mapping should be shared between all the VMAs,
34404eb2b1dcSMel Gorman 	 * __unmap_hugepage_range() is called as the lock is already held
34414eb2b1dcSMel Gorman 	 */
344283cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
34436b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
344404f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
344504f2cbe3SMel Gorman 		if (iter_vma == vma)
344604f2cbe3SMel Gorman 			continue;
344704f2cbe3SMel Gorman 
344804f2cbe3SMel Gorman 		/*
34492f84a899SMel Gorman 		 * Shared VMAs have their own reserves and do not affect
34502f84a899SMel Gorman 		 * MAP_PRIVATE accounting but it is possible that a shared
34512f84a899SMel Gorman 		 * VMA is using the same page so check and skip such VMAs.
34522f84a899SMel Gorman 		 */
34532f84a899SMel Gorman 		if (iter_vma->vm_flags & VM_MAYSHARE)
34542f84a899SMel Gorman 			continue;
34552f84a899SMel Gorman 
34562f84a899SMel Gorman 		/*
345704f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
345804f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
345904f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
346004f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
346104f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
346204f2cbe3SMel Gorman 		 */
346304f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
346424669e58SAneesh Kumar K.V 			unmap_hugepage_range(iter_vma, address,
346524669e58SAneesh Kumar K.V 					     address + huge_page_size(h), page);
346604f2cbe3SMel Gorman 	}
346783cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
346804f2cbe3SMel Gorman }
346904f2cbe3SMel Gorman 
34700fe6e20bSNaoya Horiguchi /*
34710fe6e20bSNaoya Horiguchi  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3472ef009b25SMichal Hocko  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3473ef009b25SMichal Hocko  * cannot race with other handlers or page migration.
3474ef009b25SMichal Hocko  * Keep the pte_same checks anyway to make transition from the mutex easier.
34750fe6e20bSNaoya Horiguchi  */
34761e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
34773999f52eSAneesh Kumar K.V 		       unsigned long address, pte_t *ptep,
3478cb900f41SKirill A. Shutemov 		       struct page *pagecache_page, spinlock_t *ptl)
34791e8f889bSDavid Gibson {
34803999f52eSAneesh Kumar K.V 	pte_t pte;
3481a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
34821e8f889bSDavid Gibson 	struct page *old_page, *new_page;
3483ad4404a2SDavidlohr Bueso 	int ret = 0, outside_reserve = 0;
34842ec74c3eSSagi Grimberg 	unsigned long mmun_start;	/* For mmu_notifiers */
34852ec74c3eSSagi Grimberg 	unsigned long mmun_end;		/* For mmu_notifiers */
34861e8f889bSDavid Gibson 
34873999f52eSAneesh Kumar K.V 	pte = huge_ptep_get(ptep);
34881e8f889bSDavid Gibson 	old_page = pte_page(pte);
34891e8f889bSDavid Gibson 
349004f2cbe3SMel Gorman retry_avoidcopy:
34911e8f889bSDavid Gibson 	/* If no-one else is actually using this page, avoid the copy
34921e8f889bSDavid Gibson 	 * and just make the page writable */
349337a2140dSJoonsoo Kim 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
34945a49973dSHugh Dickins 		page_move_anon_rmap(old_page, vma);
34951e8f889bSDavid Gibson 		set_huge_ptep_writable(vma, address, ptep);
349683c54070SNick Piggin 		return 0;
34971e8f889bSDavid Gibson 	}
34981e8f889bSDavid Gibson 
349904f2cbe3SMel Gorman 	/*
350004f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
350104f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
350204f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
350304f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
350404f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
350504f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
350604f2cbe3SMel Gorman 	 * of the full address range.
350704f2cbe3SMel Gorman 	 */
35085944d011SJoonsoo Kim 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
350904f2cbe3SMel Gorman 			old_page != pagecache_page)
351004f2cbe3SMel Gorman 		outside_reserve = 1;
351104f2cbe3SMel Gorman 
351209cbfeafSKirill A. Shutemov 	get_page(old_page);
3513b76c8cfbSLarry Woodman 
3514ad4404a2SDavidlohr Bueso 	/*
3515ad4404a2SDavidlohr Bueso 	 * Drop page table lock as buddy allocator may be called. It will
3516ad4404a2SDavidlohr Bueso 	 * be acquired again before returning to the caller, as expected.
3517ad4404a2SDavidlohr Bueso 	 */
3518cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
351904f2cbe3SMel Gorman 	new_page = alloc_huge_page(vma, address, outside_reserve);
35201e8f889bSDavid Gibson 
35212fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
352204f2cbe3SMel Gorman 		/*
352304f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
352404f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
352504f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
352604f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
352704f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
352804f2cbe3SMel Gorman 		 */
352904f2cbe3SMel Gorman 		if (outside_reserve) {
353009cbfeafSKirill A. Shutemov 			put_page(old_page);
353104f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
35322f4612afSDavidlohr Bueso 			unmap_ref_private(mm, vma, old_page, address);
353304f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
3534cb900f41SKirill A. Shutemov 			spin_lock(ptl);
3535a734bcc8SHillf Danton 			ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3536a9af0c5dSNaoya Horiguchi 			if (likely(ptep &&
3537a9af0c5dSNaoya Horiguchi 				   pte_same(huge_ptep_get(ptep), pte)))
353804f2cbe3SMel Gorman 				goto retry_avoidcopy;
3539a734bcc8SHillf Danton 			/*
3540cb900f41SKirill A. Shutemov 			 * race occurs while re-acquiring page table
3541cb900f41SKirill A. Shutemov 			 * lock, and our job is done.
3542a734bcc8SHillf Danton 			 */
3543a734bcc8SHillf Danton 			return 0;
354404f2cbe3SMel Gorman 		}
354504f2cbe3SMel Gorman 
3546ad4404a2SDavidlohr Bueso 		ret = (PTR_ERR(new_page) == -ENOMEM) ?
3547ad4404a2SDavidlohr Bueso 			VM_FAULT_OOM : VM_FAULT_SIGBUS;
3548ad4404a2SDavidlohr Bueso 		goto out_release_old;
35491e8f889bSDavid Gibson 	}
35501e8f889bSDavid Gibson 
35510fe6e20bSNaoya Horiguchi 	/*
35520fe6e20bSNaoya Horiguchi 	 * When the original hugepage is shared one, it does not have
35530fe6e20bSNaoya Horiguchi 	 * anon_vma prepared.
35540fe6e20bSNaoya Horiguchi 	 */
355544e2aa93SDean Nelson 	if (unlikely(anon_vma_prepare(vma))) {
3556ad4404a2SDavidlohr Bueso 		ret = VM_FAULT_OOM;
3557ad4404a2SDavidlohr Bueso 		goto out_release_all;
355844e2aa93SDean Nelson 	}
35590fe6e20bSNaoya Horiguchi 
356047ad8475SAndrea Arcangeli 	copy_user_huge_page(new_page, old_page, address, vma,
356147ad8475SAndrea Arcangeli 			    pages_per_huge_page(h));
35620ed361deSNick Piggin 	__SetPageUptodate(new_page);
3563bcc54222SNaoya Horiguchi 	set_page_huge_active(new_page);
35641e8f889bSDavid Gibson 
35652ec74c3eSSagi Grimberg 	mmun_start = address & huge_page_mask(h);
35662ec74c3eSSagi Grimberg 	mmun_end = mmun_start + huge_page_size(h);
35672ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3568ad4404a2SDavidlohr Bueso 
3569b76c8cfbSLarry Woodman 	/*
3570cb900f41SKirill A. Shutemov 	 * Retake the page table lock to check for racing updates
3571b76c8cfbSLarry Woodman 	 * before the page tables are altered
3572b76c8cfbSLarry Woodman 	 */
3573cb900f41SKirill A. Shutemov 	spin_lock(ptl);
3574a5516438SAndi Kleen 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3575a9af0c5dSNaoya Horiguchi 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
357607443a85SJoonsoo Kim 		ClearPagePrivate(new_page);
357707443a85SJoonsoo Kim 
35781e8f889bSDavid Gibson 		/* Break COW */
35798fe627ecSGerald Schaefer 		huge_ptep_clear_flush(vma, address, ptep);
358034ee645eSJoerg Roedel 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
35811e8f889bSDavid Gibson 		set_huge_pte_at(mm, address, ptep,
35821e8f889bSDavid Gibson 				make_huge_pte(vma, new_page, 1));
3583d281ee61SKirill A. Shutemov 		page_remove_rmap(old_page, true);
3584cd67f0d2SNaoya Horiguchi 		hugepage_add_new_anon_rmap(new_page, vma, address);
35851e8f889bSDavid Gibson 		/* Make the old page be freed below */
35861e8f889bSDavid Gibson 		new_page = old_page;
35871e8f889bSDavid Gibson 	}
3588cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
35892ec74c3eSSagi Grimberg 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3590ad4404a2SDavidlohr Bueso out_release_all:
359196b96a96SMike Kravetz 	restore_reserve_on_error(h, vma, address, new_page);
359209cbfeafSKirill A. Shutemov 	put_page(new_page);
3593ad4404a2SDavidlohr Bueso out_release_old:
359409cbfeafSKirill A. Shutemov 	put_page(old_page);
35958312034fSJoonsoo Kim 
3596ad4404a2SDavidlohr Bueso 	spin_lock(ptl); /* Caller expects lock to be held */
3597ad4404a2SDavidlohr Bueso 	return ret;
35981e8f889bSDavid Gibson }
35991e8f889bSDavid Gibson 
360004f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */
3601a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3602a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
360304f2cbe3SMel Gorman {
360404f2cbe3SMel Gorman 	struct address_space *mapping;
3605e7c4b0bfSAndy Whitcroft 	pgoff_t idx;
360604f2cbe3SMel Gorman 
360704f2cbe3SMel Gorman 	mapping = vma->vm_file->f_mapping;
3608a5516438SAndi Kleen 	idx = vma_hugecache_offset(h, vma, address);
360904f2cbe3SMel Gorman 
361004f2cbe3SMel Gorman 	return find_lock_page(mapping, idx);
361104f2cbe3SMel Gorman }
361204f2cbe3SMel Gorman 
36133ae77f43SHugh Dickins /*
36143ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
36153ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
36163ae77f43SHugh Dickins  */
36173ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
36182a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
36192a15efc9SHugh Dickins {
36202a15efc9SHugh Dickins 	struct address_space *mapping;
36212a15efc9SHugh Dickins 	pgoff_t idx;
36222a15efc9SHugh Dickins 	struct page *page;
36232a15efc9SHugh Dickins 
36242a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
36252a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
36262a15efc9SHugh Dickins 
36272a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
36282a15efc9SHugh Dickins 	if (page)
36292a15efc9SHugh Dickins 		put_page(page);
36302a15efc9SHugh Dickins 	return page != NULL;
36312a15efc9SHugh Dickins }
36322a15efc9SHugh Dickins 
3633ab76ad54SMike Kravetz int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3634ab76ad54SMike Kravetz 			   pgoff_t idx)
3635ab76ad54SMike Kravetz {
3636ab76ad54SMike Kravetz 	struct inode *inode = mapping->host;
3637ab76ad54SMike Kravetz 	struct hstate *h = hstate_inode(inode);
3638ab76ad54SMike Kravetz 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3639ab76ad54SMike Kravetz 
3640ab76ad54SMike Kravetz 	if (err)
3641ab76ad54SMike Kravetz 		return err;
3642ab76ad54SMike Kravetz 	ClearPagePrivate(page);
3643ab76ad54SMike Kravetz 
3644ab76ad54SMike Kravetz 	spin_lock(&inode->i_lock);
3645ab76ad54SMike Kravetz 	inode->i_blocks += blocks_per_huge_page(h);
3646ab76ad54SMike Kravetz 	spin_unlock(&inode->i_lock);
3647ab76ad54SMike Kravetz 	return 0;
3648ab76ad54SMike Kravetz }
3649ab76ad54SMike Kravetz 
3650a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
36518382d914SDavidlohr Bueso 			   struct address_space *mapping, pgoff_t idx,
3652788c7df4SHugh Dickins 			   unsigned long address, pte_t *ptep, unsigned int flags)
3653ac9b9c66SHugh Dickins {
3654a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
3655ac9b9c66SHugh Dickins 	int ret = VM_FAULT_SIGBUS;
3656409eb8c2SHillf Danton 	int anon_rmap = 0;
36574c887265SAdam Litke 	unsigned long size;
36584c887265SAdam Litke 	struct page *page;
36591e8f889bSDavid Gibson 	pte_t new_pte;
3660cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
36614c887265SAdam Litke 
366204f2cbe3SMel Gorman 	/*
366304f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
366404f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
366525985edcSLucas De Marchi 	 * COW. Warn that such a situation has occurred as it may not be obvious
366604f2cbe3SMel Gorman 	 */
366704f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3668910154d5SGeoffrey Thomas 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
366904f2cbe3SMel Gorman 			   current->pid);
367004f2cbe3SMel Gorman 		return ret;
367104f2cbe3SMel Gorman 	}
367204f2cbe3SMel Gorman 
36734c887265SAdam Litke 	/*
36744c887265SAdam Litke 	 * Use page lock to guard against racing truncation
36754c887265SAdam Litke 	 * before we get page_table_lock.
36764c887265SAdam Litke 	 */
36776bda666aSChristoph Lameter retry:
36786bda666aSChristoph Lameter 	page = find_lock_page(mapping, idx);
36796bda666aSChristoph Lameter 	if (!page) {
3680a5516438SAndi Kleen 		size = i_size_read(mapping->host) >> huge_page_shift(h);
3681ebed4bfcSHugh Dickins 		if (idx >= size)
3682ebed4bfcSHugh Dickins 			goto out;
368304f2cbe3SMel Gorman 		page = alloc_huge_page(vma, address, 0);
36842fc39cecSAdam Litke 		if (IS_ERR(page)) {
368576dcee75SAneesh Kumar K.V 			ret = PTR_ERR(page);
368676dcee75SAneesh Kumar K.V 			if (ret == -ENOMEM)
368776dcee75SAneesh Kumar K.V 				ret = VM_FAULT_OOM;
368876dcee75SAneesh Kumar K.V 			else
368976dcee75SAneesh Kumar K.V 				ret = VM_FAULT_SIGBUS;
36906bda666aSChristoph Lameter 			goto out;
36916bda666aSChristoph Lameter 		}
369247ad8475SAndrea Arcangeli 		clear_huge_page(page, address, pages_per_huge_page(h));
36930ed361deSNick Piggin 		__SetPageUptodate(page);
3694bcc54222SNaoya Horiguchi 		set_page_huge_active(page);
3695ac9b9c66SHugh Dickins 
3696f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
3697ab76ad54SMike Kravetz 			int err = huge_add_to_page_cache(page, mapping, idx);
36986bda666aSChristoph Lameter 			if (err) {
36996bda666aSChristoph Lameter 				put_page(page);
37006bda666aSChristoph Lameter 				if (err == -EEXIST)
37016bda666aSChristoph Lameter 					goto retry;
37026bda666aSChristoph Lameter 				goto out;
37036bda666aSChristoph Lameter 			}
370423be7468SMel Gorman 		} else {
37056bda666aSChristoph Lameter 			lock_page(page);
37060fe6e20bSNaoya Horiguchi 			if (unlikely(anon_vma_prepare(vma))) {
37070fe6e20bSNaoya Horiguchi 				ret = VM_FAULT_OOM;
37080fe6e20bSNaoya Horiguchi 				goto backout_unlocked;
370923be7468SMel Gorman 			}
3710409eb8c2SHillf Danton 			anon_rmap = 1;
37110fe6e20bSNaoya Horiguchi 		}
37120fe6e20bSNaoya Horiguchi 	} else {
371357303d80SAndy Whitcroft 		/*
3714998b4382SNaoya Horiguchi 		 * If memory error occurs between mmap() and fault, some process
3715998b4382SNaoya Horiguchi 		 * don't have hwpoisoned swap entry for errored virtual address.
3716998b4382SNaoya Horiguchi 		 * So we need to block hugepage fault by PG_hwpoison bit check.
3717fd6a03edSNaoya Horiguchi 		 */
3718fd6a03edSNaoya Horiguchi 		if (unlikely(PageHWPoison(page))) {
3719aa50d3a7SAndi Kleen 			ret = VM_FAULT_HWPOISON |
3720972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
3721fd6a03edSNaoya Horiguchi 			goto backout_unlocked;
37226bda666aSChristoph Lameter 		}
3723998b4382SNaoya Horiguchi 	}
37241e8f889bSDavid Gibson 
372557303d80SAndy Whitcroft 	/*
372657303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
372757303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
372857303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
372957303d80SAndy Whitcroft 	 * the spinlock.
373057303d80SAndy Whitcroft 	 */
37315e911373SMike Kravetz 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
37322b26736cSAndy Whitcroft 		if (vma_needs_reservation(h, vma, address) < 0) {
37332b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
37342b26736cSAndy Whitcroft 			goto backout_unlocked;
37352b26736cSAndy Whitcroft 		}
37365e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
3737feba16e2SMike Kravetz 		vma_end_reservation(h, vma, address);
37385e911373SMike Kravetz 	}
373957303d80SAndy Whitcroft 
37408bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(h, mm, ptep);
3741a5516438SAndi Kleen 	size = i_size_read(mapping->host) >> huge_page_shift(h);
37424c887265SAdam Litke 	if (idx >= size)
37434c887265SAdam Litke 		goto backout;
37444c887265SAdam Litke 
374583c54070SNick Piggin 	ret = 0;
37467f2e9525SGerald Schaefer 	if (!huge_pte_none(huge_ptep_get(ptep)))
37474c887265SAdam Litke 		goto backout;
37484c887265SAdam Litke 
374907443a85SJoonsoo Kim 	if (anon_rmap) {
375007443a85SJoonsoo Kim 		ClearPagePrivate(page);
3751409eb8c2SHillf Danton 		hugepage_add_new_anon_rmap(page, vma, address);
3752ac714904SChoi Gi-yong 	} else
375353f9263bSKirill A. Shutemov 		page_dup_rmap(page, true);
37541e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
37551e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
37561e8f889bSDavid Gibson 	set_huge_pte_at(mm, address, ptep, new_pte);
37571e8f889bSDavid Gibson 
37585d317b2bSNaoya Horiguchi 	hugetlb_count_add(pages_per_huge_page(h), mm);
3759788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
37601e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
37613999f52eSAneesh Kumar K.V 		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
37621e8f889bSDavid Gibson 	}
37631e8f889bSDavid Gibson 
3764cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
37654c887265SAdam Litke 	unlock_page(page);
37664c887265SAdam Litke out:
3767ac9b9c66SHugh Dickins 	return ret;
37684c887265SAdam Litke 
37694c887265SAdam Litke backout:
3770cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
37712b26736cSAndy Whitcroft backout_unlocked:
37724c887265SAdam Litke 	unlock_page(page);
377396b96a96SMike Kravetz 	restore_reserve_on_error(h, vma, address, page);
37744c887265SAdam Litke 	put_page(page);
37754c887265SAdam Litke 	goto out;
3776ac9b9c66SHugh Dickins }
3777ac9b9c66SHugh Dickins 
37788382d914SDavidlohr Bueso #ifdef CONFIG_SMP
3779c672c7f2SMike Kravetz u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
37808382d914SDavidlohr Bueso 			    struct vm_area_struct *vma,
37818382d914SDavidlohr Bueso 			    struct address_space *mapping,
37828382d914SDavidlohr Bueso 			    pgoff_t idx, unsigned long address)
37838382d914SDavidlohr Bueso {
37848382d914SDavidlohr Bueso 	unsigned long key[2];
37858382d914SDavidlohr Bueso 	u32 hash;
37868382d914SDavidlohr Bueso 
37878382d914SDavidlohr Bueso 	if (vma->vm_flags & VM_SHARED) {
37888382d914SDavidlohr Bueso 		key[0] = (unsigned long) mapping;
37898382d914SDavidlohr Bueso 		key[1] = idx;
37908382d914SDavidlohr Bueso 	} else {
37918382d914SDavidlohr Bueso 		key[0] = (unsigned long) mm;
37928382d914SDavidlohr Bueso 		key[1] = address >> huge_page_shift(h);
37938382d914SDavidlohr Bueso 	}
37948382d914SDavidlohr Bueso 
37958382d914SDavidlohr Bueso 	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
37968382d914SDavidlohr Bueso 
37978382d914SDavidlohr Bueso 	return hash & (num_fault_mutexes - 1);
37988382d914SDavidlohr Bueso }
37998382d914SDavidlohr Bueso #else
38008382d914SDavidlohr Bueso /*
38018382d914SDavidlohr Bueso  * For uniprocesor systems we always use a single mutex, so just
38028382d914SDavidlohr Bueso  * return 0 and avoid the hashing overhead.
38038382d914SDavidlohr Bueso  */
3804c672c7f2SMike Kravetz u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
38058382d914SDavidlohr Bueso 			    struct vm_area_struct *vma,
38068382d914SDavidlohr Bueso 			    struct address_space *mapping,
38078382d914SDavidlohr Bueso 			    pgoff_t idx, unsigned long address)
38088382d914SDavidlohr Bueso {
38098382d914SDavidlohr Bueso 	return 0;
38108382d914SDavidlohr Bueso }
38118382d914SDavidlohr Bueso #endif
38128382d914SDavidlohr Bueso 
381386e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3814788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
381586e5216fSAdam Litke {
38168382d914SDavidlohr Bueso 	pte_t *ptep, entry;
3817cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
38181e8f889bSDavid Gibson 	int ret;
38198382d914SDavidlohr Bueso 	u32 hash;
38208382d914SDavidlohr Bueso 	pgoff_t idx;
38210fe6e20bSNaoya Horiguchi 	struct page *page = NULL;
382257303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
3823a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
38248382d914SDavidlohr Bueso 	struct address_space *mapping;
38250f792cf9SNaoya Horiguchi 	int need_wait_lock = 0;
382686e5216fSAdam Litke 
38271e16a539SKAMEZAWA Hiroyuki 	address &= huge_page_mask(h);
38281e16a539SKAMEZAWA Hiroyuki 
3829fd6a03edSNaoya Horiguchi 	ptep = huge_pte_offset(mm, address);
3830fd6a03edSNaoya Horiguchi 	if (ptep) {
3831fd6a03edSNaoya Horiguchi 		entry = huge_ptep_get(ptep);
3832290408d4SNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(entry))) {
3833cb900f41SKirill A. Shutemov 			migration_entry_wait_huge(vma, mm, ptep);
3834290408d4SNaoya Horiguchi 			return 0;
3835290408d4SNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3836aa50d3a7SAndi Kleen 			return VM_FAULT_HWPOISON_LARGE |
3837972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
38380d777df5SNaoya Horiguchi 	} else {
3839a5516438SAndi Kleen 		ptep = huge_pte_alloc(mm, address, huge_page_size(h));
384086e5216fSAdam Litke 		if (!ptep)
384186e5216fSAdam Litke 			return VM_FAULT_OOM;
38420d777df5SNaoya Horiguchi 	}
384386e5216fSAdam Litke 
38448382d914SDavidlohr Bueso 	mapping = vma->vm_file->f_mapping;
38458382d914SDavidlohr Bueso 	idx = vma_hugecache_offset(h, vma, address);
38468382d914SDavidlohr Bueso 
38473935baa9SDavid Gibson 	/*
38483935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
38493935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
38503935baa9SDavid Gibson 	 * the same page in the page cache.
38513935baa9SDavid Gibson 	 */
3852c672c7f2SMike Kravetz 	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3853c672c7f2SMike Kravetz 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
38548382d914SDavidlohr Bueso 
38557f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
38567f2e9525SGerald Schaefer 	if (huge_pte_none(entry)) {
38578382d914SDavidlohr Bueso 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3858b4d1d99fSDavid Gibson 		goto out_mutex;
38593935baa9SDavid Gibson 	}
386086e5216fSAdam Litke 
386183c54070SNick Piggin 	ret = 0;
38621e8f889bSDavid Gibson 
386357303d80SAndy Whitcroft 	/*
38640f792cf9SNaoya Horiguchi 	 * entry could be a migration/hwpoison entry at this point, so this
38650f792cf9SNaoya Horiguchi 	 * check prevents the kernel from going below assuming that we have
38660f792cf9SNaoya Horiguchi 	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
38670f792cf9SNaoya Horiguchi 	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
38680f792cf9SNaoya Horiguchi 	 * handle it.
38690f792cf9SNaoya Horiguchi 	 */
38700f792cf9SNaoya Horiguchi 	if (!pte_present(entry))
38710f792cf9SNaoya Horiguchi 		goto out_mutex;
38720f792cf9SNaoya Horiguchi 
38730f792cf9SNaoya Horiguchi 	/*
387457303d80SAndy Whitcroft 	 * If we are going to COW the mapping later, we examine the pending
387557303d80SAndy Whitcroft 	 * reservations for this page now. This will ensure that any
387657303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
387757303d80SAndy Whitcroft 	 * spinlock. For private mappings, we also lookup the pagecache
387857303d80SAndy Whitcroft 	 * page now as it is used to determine if a reservation has been
387957303d80SAndy Whitcroft 	 * consumed.
388057303d80SAndy Whitcroft 	 */
3881106c992aSGerald Schaefer 	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
38822b26736cSAndy Whitcroft 		if (vma_needs_reservation(h, vma, address) < 0) {
38832b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
3884b4d1d99fSDavid Gibson 			goto out_mutex;
38852b26736cSAndy Whitcroft 		}
38865e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
3887feba16e2SMike Kravetz 		vma_end_reservation(h, vma, address);
388857303d80SAndy Whitcroft 
3889f83a275dSMel Gorman 		if (!(vma->vm_flags & VM_MAYSHARE))
389057303d80SAndy Whitcroft 			pagecache_page = hugetlbfs_pagecache_page(h,
389157303d80SAndy Whitcroft 								vma, address);
389257303d80SAndy Whitcroft 	}
389357303d80SAndy Whitcroft 
38940f792cf9SNaoya Horiguchi 	ptl = huge_pte_lock(h, mm, ptep);
38950fe6e20bSNaoya Horiguchi 
38961e8f889bSDavid Gibson 	/* Check for a racing update before calling hugetlb_cow */
3897b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3898cb900f41SKirill A. Shutemov 		goto out_ptl;
3899b4d1d99fSDavid Gibson 
39000f792cf9SNaoya Horiguchi 	/*
39010f792cf9SNaoya Horiguchi 	 * hugetlb_cow() requires page locks of pte_page(entry) and
39020f792cf9SNaoya Horiguchi 	 * pagecache_page, so here we need take the former one
39030f792cf9SNaoya Horiguchi 	 * when page != pagecache_page or !pagecache_page.
39040f792cf9SNaoya Horiguchi 	 */
39050f792cf9SNaoya Horiguchi 	page = pte_page(entry);
39060f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
39070f792cf9SNaoya Horiguchi 		if (!trylock_page(page)) {
39080f792cf9SNaoya Horiguchi 			need_wait_lock = 1;
39090f792cf9SNaoya Horiguchi 			goto out_ptl;
39100f792cf9SNaoya Horiguchi 		}
39110f792cf9SNaoya Horiguchi 
39120f792cf9SNaoya Horiguchi 	get_page(page);
3913b4d1d99fSDavid Gibson 
3914788c7df4SHugh Dickins 	if (flags & FAULT_FLAG_WRITE) {
3915106c992aSGerald Schaefer 		if (!huge_pte_write(entry)) {
39163999f52eSAneesh Kumar K.V 			ret = hugetlb_cow(mm, vma, address, ptep,
3917cb900f41SKirill A. Shutemov 					  pagecache_page, ptl);
39180f792cf9SNaoya Horiguchi 			goto out_put_page;
3919b4d1d99fSDavid Gibson 		}
3920106c992aSGerald Schaefer 		entry = huge_pte_mkdirty(entry);
3921b4d1d99fSDavid Gibson 	}
3922b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
3923788c7df4SHugh Dickins 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3924788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
39254b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
39260f792cf9SNaoya Horiguchi out_put_page:
39270f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
39280f792cf9SNaoya Horiguchi 		unlock_page(page);
39290f792cf9SNaoya Horiguchi 	put_page(page);
3930cb900f41SKirill A. Shutemov out_ptl:
3931cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
393257303d80SAndy Whitcroft 
393357303d80SAndy Whitcroft 	if (pagecache_page) {
393457303d80SAndy Whitcroft 		unlock_page(pagecache_page);
393557303d80SAndy Whitcroft 		put_page(pagecache_page);
393657303d80SAndy Whitcroft 	}
3937b4d1d99fSDavid Gibson out_mutex:
3938c672c7f2SMike Kravetz 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
39390f792cf9SNaoya Horiguchi 	/*
39400f792cf9SNaoya Horiguchi 	 * Generally it's safe to hold refcount during waiting page lock. But
39410f792cf9SNaoya Horiguchi 	 * here we just wait to defer the next page fault to avoid busy loop and
39420f792cf9SNaoya Horiguchi 	 * the page is not used after unlocked before returning from the current
39430f792cf9SNaoya Horiguchi 	 * page fault. So we are safe from accessing freed page, even if we wait
39440f792cf9SNaoya Horiguchi 	 * here without taking refcount.
39450f792cf9SNaoya Horiguchi 	 */
39460f792cf9SNaoya Horiguchi 	if (need_wait_lock)
39470f792cf9SNaoya Horiguchi 		wait_on_page_locked(page);
39481e8f889bSDavid Gibson 	return ret;
394986e5216fSAdam Litke }
395086e5216fSAdam Litke 
395128a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
395263551ae0SDavid Gibson 			 struct page **pages, struct vm_area_struct **vmas,
395328a35716SMichel Lespinasse 			 unsigned long *position, unsigned long *nr_pages,
395428a35716SMichel Lespinasse 			 long i, unsigned int flags)
395563551ae0SDavid Gibson {
3956d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
3957d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
395828a35716SMichel Lespinasse 	unsigned long remainder = *nr_pages;
3959a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
396063551ae0SDavid Gibson 
396163551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
396263551ae0SDavid Gibson 		pte_t *pte;
3963cb900f41SKirill A. Shutemov 		spinlock_t *ptl = NULL;
39642a15efc9SHugh Dickins 		int absent;
396563551ae0SDavid Gibson 		struct page *page;
396663551ae0SDavid Gibson 
39674c887265SAdam Litke 		/*
396802057967SDavid Rientjes 		 * If we have a pending SIGKILL, don't keep faulting pages and
396902057967SDavid Rientjes 		 * potentially allocating memory.
397002057967SDavid Rientjes 		 */
397102057967SDavid Rientjes 		if (unlikely(fatal_signal_pending(current))) {
397202057967SDavid Rientjes 			remainder = 0;
397302057967SDavid Rientjes 			break;
397402057967SDavid Rientjes 		}
397502057967SDavid Rientjes 
397602057967SDavid Rientjes 		/*
39774c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
39782a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
39794c887265SAdam Litke 		 * first, for the page indexing below to work.
3980cb900f41SKirill A. Shutemov 		 *
3981cb900f41SKirill A. Shutemov 		 * Note that page table lock is not held when pte is null.
39824c887265SAdam Litke 		 */
3983a5516438SAndi Kleen 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3984cb900f41SKirill A. Shutemov 		if (pte)
3985cb900f41SKirill A. Shutemov 			ptl = huge_pte_lock(h, mm, pte);
39862a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
398763551ae0SDavid Gibson 
39882a15efc9SHugh Dickins 		/*
39892a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
39903ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
39913ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
39923ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
39933ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
39942a15efc9SHugh Dickins 		 */
39953ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
39963ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3997cb900f41SKirill A. Shutemov 			if (pte)
3998cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
39992a15efc9SHugh Dickins 			remainder = 0;
40002a15efc9SHugh Dickins 			break;
40012a15efc9SHugh Dickins 		}
40022a15efc9SHugh Dickins 
40039cc3a5bdSNaoya Horiguchi 		/*
40049cc3a5bdSNaoya Horiguchi 		 * We need call hugetlb_fault for both hugepages under migration
40059cc3a5bdSNaoya Horiguchi 		 * (in which case hugetlb_fault waits for the migration,) and
40069cc3a5bdSNaoya Horiguchi 		 * hwpoisoned hugepages (in which case we need to prevent the
40079cc3a5bdSNaoya Horiguchi 		 * caller from accessing to them.) In order to do this, we use
40089cc3a5bdSNaoya Horiguchi 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
40099cc3a5bdSNaoya Horiguchi 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
40109cc3a5bdSNaoya Horiguchi 		 * both cases, and because we can't follow correct pages
40119cc3a5bdSNaoya Horiguchi 		 * directly from any kind of swap entries.
40129cc3a5bdSNaoya Horiguchi 		 */
40139cc3a5bdSNaoya Horiguchi 		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4014106c992aSGerald Schaefer 		    ((flags & FOLL_WRITE) &&
4015106c992aSGerald Schaefer 		      !huge_pte_write(huge_ptep_get(pte)))) {
40164c887265SAdam Litke 			int ret;
40174c887265SAdam Litke 
4018cb900f41SKirill A. Shutemov 			if (pte)
4019cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
40202a15efc9SHugh Dickins 			ret = hugetlb_fault(mm, vma, vaddr,
40212a15efc9SHugh Dickins 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
4022a89182c7SAdam Litke 			if (!(ret & VM_FAULT_ERROR))
40234c887265SAdam Litke 				continue;
40244c887265SAdam Litke 
40251c59827dSHugh Dickins 			remainder = 0;
40261c59827dSHugh Dickins 			break;
40271c59827dSHugh Dickins 		}
402863551ae0SDavid Gibson 
4029a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
40307f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
4031d5d4b0aaSChen, Kenneth W same_page:
4032d6692183SChen, Kenneth W 		if (pages) {
403369d177c2SAndy Whitcroft 			pages[i] = mem_map_offset(page, pfn_offset);
4034ddc58f27SKirill A. Shutemov 			get_page(pages[i]);
4035d6692183SChen, Kenneth W 		}
403663551ae0SDavid Gibson 
403763551ae0SDavid Gibson 		if (vmas)
403863551ae0SDavid Gibson 			vmas[i] = vma;
403963551ae0SDavid Gibson 
404063551ae0SDavid Gibson 		vaddr += PAGE_SIZE;
4041d5d4b0aaSChen, Kenneth W 		++pfn_offset;
404263551ae0SDavid Gibson 		--remainder;
404363551ae0SDavid Gibson 		++i;
4044d5d4b0aaSChen, Kenneth W 		if (vaddr < vma->vm_end && remainder &&
4045a5516438SAndi Kleen 				pfn_offset < pages_per_huge_page(h)) {
4046d5d4b0aaSChen, Kenneth W 			/*
4047d5d4b0aaSChen, Kenneth W 			 * We use pfn_offset to avoid touching the pageframes
4048d5d4b0aaSChen, Kenneth W 			 * of this compound page.
4049d5d4b0aaSChen, Kenneth W 			 */
4050d5d4b0aaSChen, Kenneth W 			goto same_page;
4051d5d4b0aaSChen, Kenneth W 		}
4052cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
405363551ae0SDavid Gibson 	}
405428a35716SMichel Lespinasse 	*nr_pages = remainder;
405563551ae0SDavid Gibson 	*position = vaddr;
405663551ae0SDavid Gibson 
40572a15efc9SHugh Dickins 	return i ? i : -EFAULT;
405863551ae0SDavid Gibson }
40598f860591SZhang, Yanmin 
40605491ae7bSAneesh Kumar K.V #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
40615491ae7bSAneesh Kumar K.V /*
40625491ae7bSAneesh Kumar K.V  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
40635491ae7bSAneesh Kumar K.V  * implement this.
40645491ae7bSAneesh Kumar K.V  */
40655491ae7bSAneesh Kumar K.V #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
40665491ae7bSAneesh Kumar K.V #endif
40675491ae7bSAneesh Kumar K.V 
40687da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
40698f860591SZhang, Yanmin 		unsigned long address, unsigned long end, pgprot_t newprot)
40708f860591SZhang, Yanmin {
40718f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
40728f860591SZhang, Yanmin 	unsigned long start = address;
40738f860591SZhang, Yanmin 	pte_t *ptep;
40748f860591SZhang, Yanmin 	pte_t pte;
4075a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
40767da4d641SPeter Zijlstra 	unsigned long pages = 0;
40778f860591SZhang, Yanmin 
40788f860591SZhang, Yanmin 	BUG_ON(address >= end);
40798f860591SZhang, Yanmin 	flush_cache_range(vma, address, end);
40808f860591SZhang, Yanmin 
4081a5338093SRik van Riel 	mmu_notifier_invalidate_range_start(mm, start, end);
408283cde9e8SDavidlohr Bueso 	i_mmap_lock_write(vma->vm_file->f_mapping);
4083a5516438SAndi Kleen 	for (; address < end; address += huge_page_size(h)) {
4084cb900f41SKirill A. Shutemov 		spinlock_t *ptl;
40858f860591SZhang, Yanmin 		ptep = huge_pte_offset(mm, address);
40868f860591SZhang, Yanmin 		if (!ptep)
40878f860591SZhang, Yanmin 			continue;
4088cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
40897da4d641SPeter Zijlstra 		if (huge_pmd_unshare(mm, &address, ptep)) {
40907da4d641SPeter Zijlstra 			pages++;
4091cb900f41SKirill A. Shutemov 			spin_unlock(ptl);
409239dde65cSChen, Kenneth W 			continue;
40937da4d641SPeter Zijlstra 		}
4094a8bda28dSNaoya Horiguchi 		pte = huge_ptep_get(ptep);
4095a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4096a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
4097a8bda28dSNaoya Horiguchi 			continue;
4098a8bda28dSNaoya Horiguchi 		}
4099a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(pte))) {
4100a8bda28dSNaoya Horiguchi 			swp_entry_t entry = pte_to_swp_entry(pte);
4101a8bda28dSNaoya Horiguchi 
4102a8bda28dSNaoya Horiguchi 			if (is_write_migration_entry(entry)) {
4103a8bda28dSNaoya Horiguchi 				pte_t newpte;
4104a8bda28dSNaoya Horiguchi 
4105a8bda28dSNaoya Horiguchi 				make_migration_entry_read(&entry);
4106a8bda28dSNaoya Horiguchi 				newpte = swp_entry_to_pte(entry);
4107a8bda28dSNaoya Horiguchi 				set_huge_pte_at(mm, address, ptep, newpte);
4108a8bda28dSNaoya Horiguchi 				pages++;
4109a8bda28dSNaoya Horiguchi 			}
4110a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
4111a8bda28dSNaoya Horiguchi 			continue;
4112a8bda28dSNaoya Horiguchi 		}
4113a8bda28dSNaoya Horiguchi 		if (!huge_pte_none(pte)) {
41148f860591SZhang, Yanmin 			pte = huge_ptep_get_and_clear(mm, address, ptep);
4115106c992aSGerald Schaefer 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4116be7517d6STony Lu 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
41178f860591SZhang, Yanmin 			set_huge_pte_at(mm, address, ptep, pte);
41187da4d641SPeter Zijlstra 			pages++;
41198f860591SZhang, Yanmin 		}
4120cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
41218f860591SZhang, Yanmin 	}
4122d833352aSMel Gorman 	/*
4123c8c06efaSDavidlohr Bueso 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4124d833352aSMel Gorman 	 * may have cleared our pud entry and done put_page on the page table:
4125c8c06efaSDavidlohr Bueso 	 * once we release i_mmap_rwsem, another task can do the final put_page
4126d833352aSMel Gorman 	 * and that page table be reused and filled with junk.
4127d833352aSMel Gorman 	 */
41285491ae7bSAneesh Kumar K.V 	flush_hugetlb_tlb_range(vma, start, end);
412934ee645eSJoerg Roedel 	mmu_notifier_invalidate_range(mm, start, end);
413083cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(vma->vm_file->f_mapping);
4131a5338093SRik van Riel 	mmu_notifier_invalidate_range_end(mm, start, end);
41327da4d641SPeter Zijlstra 
41337da4d641SPeter Zijlstra 	return pages << h->order;
41348f860591SZhang, Yanmin }
41358f860591SZhang, Yanmin 
4136a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode,
4137a1e78772SMel Gorman 					long from, long to,
41385a6fe125SMel Gorman 					struct vm_area_struct *vma,
4139ca16d140SKOSAKI Motohiro 					vm_flags_t vm_flags)
4140e4e574b7SAdam Litke {
414117c9d12eSMel Gorman 	long ret, chg;
4142a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
414390481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
41449119a41eSJoonsoo Kim 	struct resv_map *resv_map;
41451c5ecae3SMike Kravetz 	long gbl_reserve;
4146e4e574b7SAdam Litke 
4147a1e78772SMel Gorman 	/*
414817c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
414917c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
415090481622SDavid Gibson 	 * without using reserves
415117c9d12eSMel Gorman 	 */
4152ca16d140SKOSAKI Motohiro 	if (vm_flags & VM_NORESERVE)
415317c9d12eSMel Gorman 		return 0;
415417c9d12eSMel Gorman 
415517c9d12eSMel Gorman 	/*
4156a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
4157a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
4158a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
4159a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
4160a1e78772SMel Gorman 	 */
41619119a41eSJoonsoo Kim 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
41624e35f483SJoonsoo Kim 		resv_map = inode_resv_map(inode);
41639119a41eSJoonsoo Kim 
41641406ec9bSJoonsoo Kim 		chg = region_chg(resv_map, from, to);
41659119a41eSJoonsoo Kim 
41669119a41eSJoonsoo Kim 	} else {
41679119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
41685a6fe125SMel Gorman 		if (!resv_map)
41695a6fe125SMel Gorman 			return -ENOMEM;
41705a6fe125SMel Gorman 
417117c9d12eSMel Gorman 		chg = to - from;
417217c9d12eSMel Gorman 
41735a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
41745a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
41755a6fe125SMel Gorman 	}
41765a6fe125SMel Gorman 
4177c50ac050SDave Hansen 	if (chg < 0) {
4178c50ac050SDave Hansen 		ret = chg;
4179c50ac050SDave Hansen 		goto out_err;
4180c50ac050SDave Hansen 	}
418117c9d12eSMel Gorman 
41821c5ecae3SMike Kravetz 	/*
41831c5ecae3SMike Kravetz 	 * There must be enough pages in the subpool for the mapping. If
41841c5ecae3SMike Kravetz 	 * the subpool has a minimum size, there may be some global
41851c5ecae3SMike Kravetz 	 * reservations already in place (gbl_reserve).
41861c5ecae3SMike Kravetz 	 */
41871c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
41881c5ecae3SMike Kravetz 	if (gbl_reserve < 0) {
4189c50ac050SDave Hansen 		ret = -ENOSPC;
4190c50ac050SDave Hansen 		goto out_err;
4191c50ac050SDave Hansen 	}
419217c9d12eSMel Gorman 
419317c9d12eSMel Gorman 	/*
419417c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
419590481622SDavid Gibson 	 * Hand the pages back to the subpool if there are not
419617c9d12eSMel Gorman 	 */
41971c5ecae3SMike Kravetz 	ret = hugetlb_acct_memory(h, gbl_reserve);
419817c9d12eSMel Gorman 	if (ret < 0) {
41991c5ecae3SMike Kravetz 		/* put back original number of pages, chg */
42001c5ecae3SMike Kravetz 		(void)hugepage_subpool_put_pages(spool, chg);
4201c50ac050SDave Hansen 		goto out_err;
420217c9d12eSMel Gorman 	}
420317c9d12eSMel Gorman 
420417c9d12eSMel Gorman 	/*
420517c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
420617c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
420717c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
420817c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
420917c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
421017c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
421117c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
421217c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
421317c9d12eSMel Gorman 	 * else has to be done for private mappings here
421417c9d12eSMel Gorman 	 */
421533039678SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
421633039678SMike Kravetz 		long add = region_add(resv_map, from, to);
421733039678SMike Kravetz 
421833039678SMike Kravetz 		if (unlikely(chg > add)) {
421933039678SMike Kravetz 			/*
422033039678SMike Kravetz 			 * pages in this range were added to the reserve
422133039678SMike Kravetz 			 * map between region_chg and region_add.  This
422233039678SMike Kravetz 			 * indicates a race with alloc_huge_page.  Adjust
422333039678SMike Kravetz 			 * the subpool and reserve counts modified above
422433039678SMike Kravetz 			 * based on the difference.
422533039678SMike Kravetz 			 */
422633039678SMike Kravetz 			long rsv_adjust;
422733039678SMike Kravetz 
422833039678SMike Kravetz 			rsv_adjust = hugepage_subpool_put_pages(spool,
422933039678SMike Kravetz 								chg - add);
423033039678SMike Kravetz 			hugetlb_acct_memory(h, -rsv_adjust);
423133039678SMike Kravetz 		}
423233039678SMike Kravetz 	}
4233a43a8c39SChen, Kenneth W 	return 0;
4234c50ac050SDave Hansen out_err:
42355e911373SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE)
42365e911373SMike Kravetz 		region_abort(resv_map, from, to);
4237f031dd27SJoonsoo Kim 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4238f031dd27SJoonsoo Kim 		kref_put(&resv_map->refs, resv_map_release);
4239c50ac050SDave Hansen 	return ret;
4240a43a8c39SChen, Kenneth W }
4241a43a8c39SChen, Kenneth W 
4242b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4243b5cec28dSMike Kravetz 								long freed)
4244a43a8c39SChen, Kenneth W {
4245a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
42464e35f483SJoonsoo Kim 	struct resv_map *resv_map = inode_resv_map(inode);
42479119a41eSJoonsoo Kim 	long chg = 0;
424890481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
42491c5ecae3SMike Kravetz 	long gbl_reserve;
425045c682a6SKen Chen 
4251b5cec28dSMike Kravetz 	if (resv_map) {
4252b5cec28dSMike Kravetz 		chg = region_del(resv_map, start, end);
4253b5cec28dSMike Kravetz 		/*
4254b5cec28dSMike Kravetz 		 * region_del() can fail in the rare case where a region
4255b5cec28dSMike Kravetz 		 * must be split and another region descriptor can not be
4256b5cec28dSMike Kravetz 		 * allocated.  If end == LONG_MAX, it will not fail.
4257b5cec28dSMike Kravetz 		 */
4258b5cec28dSMike Kravetz 		if (chg < 0)
4259b5cec28dSMike Kravetz 			return chg;
4260b5cec28dSMike Kravetz 	}
4261b5cec28dSMike Kravetz 
426245c682a6SKen Chen 	spin_lock(&inode->i_lock);
4263e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
426445c682a6SKen Chen 	spin_unlock(&inode->i_lock);
426545c682a6SKen Chen 
42661c5ecae3SMike Kravetz 	/*
42671c5ecae3SMike Kravetz 	 * If the subpool has a minimum size, the number of global
42681c5ecae3SMike Kravetz 	 * reservations to be released may be adjusted.
42691c5ecae3SMike Kravetz 	 */
42701c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
42711c5ecae3SMike Kravetz 	hugetlb_acct_memory(h, -gbl_reserve);
4272b5cec28dSMike Kravetz 
4273b5cec28dSMike Kravetz 	return 0;
4274a43a8c39SChen, Kenneth W }
427593f70f90SNaoya Horiguchi 
42763212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
42773212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
42783212b535SSteve Capper 				struct vm_area_struct *vma,
42793212b535SSteve Capper 				unsigned long addr, pgoff_t idx)
42803212b535SSteve Capper {
42813212b535SSteve Capper 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
42823212b535SSteve Capper 				svma->vm_start;
42833212b535SSteve Capper 	unsigned long sbase = saddr & PUD_MASK;
42843212b535SSteve Capper 	unsigned long s_end = sbase + PUD_SIZE;
42853212b535SSteve Capper 
42863212b535SSteve Capper 	/* Allow segments to share if only one is marked locked */
4287de60f5f1SEric B Munson 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4288de60f5f1SEric B Munson 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
42893212b535SSteve Capper 
42903212b535SSteve Capper 	/*
42913212b535SSteve Capper 	 * match the virtual addresses, permission and the alignment of the
42923212b535SSteve Capper 	 * page table page.
42933212b535SSteve Capper 	 */
42943212b535SSteve Capper 	if (pmd_index(addr) != pmd_index(saddr) ||
42953212b535SSteve Capper 	    vm_flags != svm_flags ||
42963212b535SSteve Capper 	    sbase < svma->vm_start || svma->vm_end < s_end)
42973212b535SSteve Capper 		return 0;
42983212b535SSteve Capper 
42993212b535SSteve Capper 	return saddr;
43003212b535SSteve Capper }
43013212b535SSteve Capper 
430231aafb45SNicholas Krause static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
43033212b535SSteve Capper {
43043212b535SSteve Capper 	unsigned long base = addr & PUD_MASK;
43053212b535SSteve Capper 	unsigned long end = base + PUD_SIZE;
43063212b535SSteve Capper 
43073212b535SSteve Capper 	/*
43083212b535SSteve Capper 	 * check on proper vm_flags and page table alignment
43093212b535SSteve Capper 	 */
43103212b535SSteve Capper 	if (vma->vm_flags & VM_MAYSHARE &&
43113212b535SSteve Capper 	    vma->vm_start <= base && end <= vma->vm_end)
431231aafb45SNicholas Krause 		return true;
431331aafb45SNicholas Krause 	return false;
43143212b535SSteve Capper }
43153212b535SSteve Capper 
43163212b535SSteve Capper /*
43173212b535SSteve Capper  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
43183212b535SSteve Capper  * and returns the corresponding pte. While this is not necessary for the
43193212b535SSteve Capper  * !shared pmd case because we can allocate the pmd later as well, it makes the
43203212b535SSteve Capper  * code much cleaner. pmd allocation is essential for the shared case because
4321c8c06efaSDavidlohr Bueso  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
43223212b535SSteve Capper  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
43233212b535SSteve Capper  * bad pmd for sharing.
43243212b535SSteve Capper  */
43253212b535SSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
43263212b535SSteve Capper {
43273212b535SSteve Capper 	struct vm_area_struct *vma = find_vma(mm, addr);
43283212b535SSteve Capper 	struct address_space *mapping = vma->vm_file->f_mapping;
43293212b535SSteve Capper 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
43303212b535SSteve Capper 			vma->vm_pgoff;
43313212b535SSteve Capper 	struct vm_area_struct *svma;
43323212b535SSteve Capper 	unsigned long saddr;
43333212b535SSteve Capper 	pte_t *spte = NULL;
43343212b535SSteve Capper 	pte_t *pte;
4335cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
43363212b535SSteve Capper 
43373212b535SSteve Capper 	if (!vma_shareable(vma, addr))
43383212b535SSteve Capper 		return (pte_t *)pmd_alloc(mm, pud, addr);
43393212b535SSteve Capper 
434083cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
43413212b535SSteve Capper 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
43423212b535SSteve Capper 		if (svma == vma)
43433212b535SSteve Capper 			continue;
43443212b535SSteve Capper 
43453212b535SSteve Capper 		saddr = page_table_shareable(svma, vma, addr, idx);
43463212b535SSteve Capper 		if (saddr) {
43473212b535SSteve Capper 			spte = huge_pte_offset(svma->vm_mm, saddr);
43483212b535SSteve Capper 			if (spte) {
43493212b535SSteve Capper 				get_page(virt_to_page(spte));
43503212b535SSteve Capper 				break;
43513212b535SSteve Capper 			}
43523212b535SSteve Capper 		}
43533212b535SSteve Capper 	}
43543212b535SSteve Capper 
43553212b535SSteve Capper 	if (!spte)
43563212b535SSteve Capper 		goto out;
43573212b535SSteve Capper 
43588bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4359dc6c9a35SKirill A. Shutemov 	if (pud_none(*pud)) {
43603212b535SSteve Capper 		pud_populate(mm, pud,
43613212b535SSteve Capper 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
4362c17b1f42SKirill A. Shutemov 		mm_inc_nr_pmds(mm);
4363dc6c9a35SKirill A. Shutemov 	} else {
43643212b535SSteve Capper 		put_page(virt_to_page(spte));
4365dc6c9a35SKirill A. Shutemov 	}
4366cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
43673212b535SSteve Capper out:
43683212b535SSteve Capper 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
436983cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
43703212b535SSteve Capper 	return pte;
43713212b535SSteve Capper }
43723212b535SSteve Capper 
43733212b535SSteve Capper /*
43743212b535SSteve Capper  * unmap huge page backed by shared pte.
43753212b535SSteve Capper  *
43763212b535SSteve Capper  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
43773212b535SSteve Capper  * indicated by page_count > 1, unmap is achieved by clearing pud and
43783212b535SSteve Capper  * decrementing the ref count. If count == 1, the pte page is not shared.
43793212b535SSteve Capper  *
4380cb900f41SKirill A. Shutemov  * called with page table lock held.
43813212b535SSteve Capper  *
43823212b535SSteve Capper  * returns: 1 successfully unmapped a shared pte page
43833212b535SSteve Capper  *	    0 the underlying pte page is not shared, or it is the last user
43843212b535SSteve Capper  */
43853212b535SSteve Capper int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
43863212b535SSteve Capper {
43873212b535SSteve Capper 	pgd_t *pgd = pgd_offset(mm, *addr);
43883212b535SSteve Capper 	pud_t *pud = pud_offset(pgd, *addr);
43893212b535SSteve Capper 
43903212b535SSteve Capper 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
43913212b535SSteve Capper 	if (page_count(virt_to_page(ptep)) == 1)
43923212b535SSteve Capper 		return 0;
43933212b535SSteve Capper 
43943212b535SSteve Capper 	pud_clear(pud);
43953212b535SSteve Capper 	put_page(virt_to_page(ptep));
4396dc6c9a35SKirill A. Shutemov 	mm_dec_nr_pmds(mm);
43973212b535SSteve Capper 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
43983212b535SSteve Capper 	return 1;
43993212b535SSteve Capper }
44009e5fc74cSSteve Capper #define want_pmd_share()	(1)
44019e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
44029e5fc74cSSteve Capper pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
44039e5fc74cSSteve Capper {
44049e5fc74cSSteve Capper 	return NULL;
44059e5fc74cSSteve Capper }
4406e81f2d22SZhang Zhen 
4407e81f2d22SZhang Zhen int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4408e81f2d22SZhang Zhen {
4409e81f2d22SZhang Zhen 	return 0;
4410e81f2d22SZhang Zhen }
44119e5fc74cSSteve Capper #define want_pmd_share()	(0)
44123212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
44133212b535SSteve Capper 
44149e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
44159e5fc74cSSteve Capper pte_t *huge_pte_alloc(struct mm_struct *mm,
44169e5fc74cSSteve Capper 			unsigned long addr, unsigned long sz)
44179e5fc74cSSteve Capper {
44189e5fc74cSSteve Capper 	pgd_t *pgd;
44199e5fc74cSSteve Capper 	pud_t *pud;
44209e5fc74cSSteve Capper 	pte_t *pte = NULL;
44219e5fc74cSSteve Capper 
44229e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
44239e5fc74cSSteve Capper 	pud = pud_alloc(mm, pgd, addr);
44249e5fc74cSSteve Capper 	if (pud) {
44259e5fc74cSSteve Capper 		if (sz == PUD_SIZE) {
44269e5fc74cSSteve Capper 			pte = (pte_t *)pud;
44279e5fc74cSSteve Capper 		} else {
44289e5fc74cSSteve Capper 			BUG_ON(sz != PMD_SIZE);
44299e5fc74cSSteve Capper 			if (want_pmd_share() && pud_none(*pud))
44309e5fc74cSSteve Capper 				pte = huge_pmd_share(mm, addr, pud);
44319e5fc74cSSteve Capper 			else
44329e5fc74cSSteve Capper 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
44339e5fc74cSSteve Capper 		}
44349e5fc74cSSteve Capper 	}
44354e666314SMichal Hocko 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
44369e5fc74cSSteve Capper 
44379e5fc74cSSteve Capper 	return pte;
44389e5fc74cSSteve Capper }
44399e5fc74cSSteve Capper 
44409e5fc74cSSteve Capper pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
44419e5fc74cSSteve Capper {
44429e5fc74cSSteve Capper 	pgd_t *pgd;
44439e5fc74cSSteve Capper 	pud_t *pud;
44449e5fc74cSSteve Capper 	pmd_t *pmd = NULL;
44459e5fc74cSSteve Capper 
44469e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
44479e5fc74cSSteve Capper 	if (pgd_present(*pgd)) {
44489e5fc74cSSteve Capper 		pud = pud_offset(pgd, addr);
44499e5fc74cSSteve Capper 		if (pud_present(*pud)) {
44509e5fc74cSSteve Capper 			if (pud_huge(*pud))
44519e5fc74cSSteve Capper 				return (pte_t *)pud;
44529e5fc74cSSteve Capper 			pmd = pmd_offset(pud, addr);
44539e5fc74cSSteve Capper 		}
44549e5fc74cSSteve Capper 	}
44559e5fc74cSSteve Capper 	return (pte_t *) pmd;
44569e5fc74cSSteve Capper }
44579e5fc74cSSteve Capper 
445861f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
445961f77edaSNaoya Horiguchi 
446061f77edaSNaoya Horiguchi /*
446161f77edaSNaoya Horiguchi  * These functions are overwritable if your architecture needs its own
446261f77edaSNaoya Horiguchi  * behavior.
446361f77edaSNaoya Horiguchi  */
446461f77edaSNaoya Horiguchi struct page * __weak
446561f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address,
446661f77edaSNaoya Horiguchi 			      int write)
446761f77edaSNaoya Horiguchi {
446861f77edaSNaoya Horiguchi 	return ERR_PTR(-EINVAL);
446961f77edaSNaoya Horiguchi }
447061f77edaSNaoya Horiguchi 
447161f77edaSNaoya Horiguchi struct page * __weak
44729e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4473e66f17ffSNaoya Horiguchi 		pmd_t *pmd, int flags)
44749e5fc74cSSteve Capper {
4475e66f17ffSNaoya Horiguchi 	struct page *page = NULL;
4476e66f17ffSNaoya Horiguchi 	spinlock_t *ptl;
4477e66f17ffSNaoya Horiguchi retry:
4478e66f17ffSNaoya Horiguchi 	ptl = pmd_lockptr(mm, pmd);
4479e66f17ffSNaoya Horiguchi 	spin_lock(ptl);
4480e66f17ffSNaoya Horiguchi 	/*
4481e66f17ffSNaoya Horiguchi 	 * make sure that the address range covered by this pmd is not
4482e66f17ffSNaoya Horiguchi 	 * unmapped from other threads.
4483e66f17ffSNaoya Horiguchi 	 */
4484e66f17ffSNaoya Horiguchi 	if (!pmd_huge(*pmd))
4485e66f17ffSNaoya Horiguchi 		goto out;
4486e66f17ffSNaoya Horiguchi 	if (pmd_present(*pmd)) {
448797534127SGerald Schaefer 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4488e66f17ffSNaoya Horiguchi 		if (flags & FOLL_GET)
4489e66f17ffSNaoya Horiguchi 			get_page(page);
4490e66f17ffSNaoya Horiguchi 	} else {
4491e66f17ffSNaoya Horiguchi 		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4492e66f17ffSNaoya Horiguchi 			spin_unlock(ptl);
4493e66f17ffSNaoya Horiguchi 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
4494e66f17ffSNaoya Horiguchi 			goto retry;
4495e66f17ffSNaoya Horiguchi 		}
4496e66f17ffSNaoya Horiguchi 		/*
4497e66f17ffSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
4498e66f17ffSNaoya Horiguchi 		 * follow_page_mask().
4499e66f17ffSNaoya Horiguchi 		 */
4500e66f17ffSNaoya Horiguchi 	}
4501e66f17ffSNaoya Horiguchi out:
4502e66f17ffSNaoya Horiguchi 	spin_unlock(ptl);
45039e5fc74cSSteve Capper 	return page;
45049e5fc74cSSteve Capper }
45059e5fc74cSSteve Capper 
450661f77edaSNaoya Horiguchi struct page * __weak
45079e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address,
4508e66f17ffSNaoya Horiguchi 		pud_t *pud, int flags)
45099e5fc74cSSteve Capper {
4510e66f17ffSNaoya Horiguchi 	if (flags & FOLL_GET)
4511e66f17ffSNaoya Horiguchi 		return NULL;
45129e5fc74cSSteve Capper 
4513e66f17ffSNaoya Horiguchi 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
45149e5fc74cSSteve Capper }
45159e5fc74cSSteve Capper 
4516d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
4517d5bd9106SAndi Kleen 
451893f70f90SNaoya Horiguchi /*
451993f70f90SNaoya Horiguchi  * This function is called from memory failure code.
452093f70f90SNaoya Horiguchi  */
45216de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage)
452293f70f90SNaoya Horiguchi {
452393f70f90SNaoya Horiguchi 	struct hstate *h = page_hstate(hpage);
452493f70f90SNaoya Horiguchi 	int nid = page_to_nid(hpage);
45256de2b1aaSNaoya Horiguchi 	int ret = -EBUSY;
452693f70f90SNaoya Horiguchi 
452793f70f90SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
45287e1f049eSNaoya Horiguchi 	/*
45297e1f049eSNaoya Horiguchi 	 * Just checking !page_huge_active is not enough, because that could be
45307e1f049eSNaoya Horiguchi 	 * an isolated/hwpoisoned hugepage (which have >0 refcount).
45317e1f049eSNaoya Horiguchi 	 */
45327e1f049eSNaoya Horiguchi 	if (!page_huge_active(hpage) && !page_count(hpage)) {
453356f2fb14SNaoya Horiguchi 		/*
453456f2fb14SNaoya Horiguchi 		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
453556f2fb14SNaoya Horiguchi 		 * but dangling hpage->lru can trigger list-debug warnings
453656f2fb14SNaoya Horiguchi 		 * (this happens when we call unpoison_memory() on it),
453756f2fb14SNaoya Horiguchi 		 * so let it point to itself with list_del_init().
453856f2fb14SNaoya Horiguchi 		 */
453956f2fb14SNaoya Horiguchi 		list_del_init(&hpage->lru);
45408c6c2ecbSNaoya Horiguchi 		set_page_refcounted(hpage);
454193f70f90SNaoya Horiguchi 		h->free_huge_pages--;
454293f70f90SNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
45436de2b1aaSNaoya Horiguchi 		ret = 0;
454493f70f90SNaoya Horiguchi 	}
45456de2b1aaSNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
45466de2b1aaSNaoya Horiguchi 	return ret;
45476de2b1aaSNaoya Horiguchi }
45486de2b1aaSNaoya Horiguchi #endif
454931caf665SNaoya Horiguchi 
455031caf665SNaoya Horiguchi bool isolate_huge_page(struct page *page, struct list_head *list)
455131caf665SNaoya Horiguchi {
4552bcc54222SNaoya Horiguchi 	bool ret = true;
4553bcc54222SNaoya Horiguchi 
4554309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
455531caf665SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
4556bcc54222SNaoya Horiguchi 	if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4557bcc54222SNaoya Horiguchi 		ret = false;
4558bcc54222SNaoya Horiguchi 		goto unlock;
4559bcc54222SNaoya Horiguchi 	}
4560bcc54222SNaoya Horiguchi 	clear_page_huge_active(page);
456131caf665SNaoya Horiguchi 	list_move_tail(&page->lru, list);
4562bcc54222SNaoya Horiguchi unlock:
456331caf665SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
4564bcc54222SNaoya Horiguchi 	return ret;
456531caf665SNaoya Horiguchi }
456631caf665SNaoya Horiguchi 
456731caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page)
456831caf665SNaoya Horiguchi {
4569309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageHead(page), page);
457031caf665SNaoya Horiguchi 	spin_lock(&hugetlb_lock);
4571bcc54222SNaoya Horiguchi 	set_page_huge_active(page);
457231caf665SNaoya Horiguchi 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
457331caf665SNaoya Horiguchi 	spin_unlock(&hugetlb_lock);
457431caf665SNaoya Horiguchi 	put_page(page);
457531caf665SNaoya Horiguchi }
4576