xref: /openbmc/linux/mm/hugetlb.c (revision 60dfaad6)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Generic hugetlb support.
46d49e352SNadia Yvette Chambers  * (C) Nadia Yvette Chambers, April 2004
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include <linux/list.h>
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/mm.h>
9e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
101da177e4SLinus Torvalds #include <linux/sysctl.h>
111da177e4SLinus Torvalds #include <linux/highmem.h>
12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
131da177e4SLinus Torvalds #include <linux/nodemask.h>
1463551ae0SDavid Gibson #include <linux/pagemap.h>
155da7ca86SChristoph Lameter #include <linux/mempolicy.h>
163b32123dSGideon Israel Dsouza #include <linux/compiler.h>
17aea47ff3SChristoph Lameter #include <linux/cpuset.h>
183935baa9SDavid Gibson #include <linux/mutex.h>
1997ad1087SMike Rapoport #include <linux/memblock.h>
20a3437870SNishanth Aravamudan #include <linux/sysfs.h>
215a0e3ad6STejun Heo #include <linux/slab.h>
22bbe88753SJoonsoo Kim #include <linux/sched/mm.h>
2363489f8eSMike Kravetz #include <linux/mmdebug.h>
24174cd4b1SIngo Molnar #include <linux/sched/signal.h>
250fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
26c6247f72SMatthew Wilcox #include <linux/string_helpers.h>
27fd6a03edSNaoya Horiguchi #include <linux/swap.h>
28fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
298382d914SDavidlohr Bueso #include <linux/jhash.h>
3098fa15f3SAnshuman Khandual #include <linux/numa.h>
31c77c0a8aSWaiman Long #include <linux/llist.h>
32cf11e85fSRoman Gushchin #include <linux/cma.h>
338cc5fcbbSMina Almasry #include <linux/migrate.h>
34f9317f77SMike Kravetz #include <linux/nospec.h>
35d6606683SLinus Torvalds 
3663551ae0SDavid Gibson #include <asm/page.h>
37ca15ca40SMike Rapoport #include <asm/pgalloc.h>
3824669e58SAneesh Kumar K.V #include <asm/tlb.h>
3963551ae0SDavid Gibson 
4024669e58SAneesh Kumar K.V #include <linux/io.h>
4163551ae0SDavid Gibson #include <linux/hugetlb.h>
429dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
439a305230SLee Schermerhorn #include <linux/node.h>
44ab5ac90aSMichal Hocko #include <linux/page_owner.h>
457835e98bSNick Piggin #include "internal.h"
46f41f2ed4SMuchun Song #include "hugetlb_vmemmap.h"
471da177e4SLinus Torvalds 
48c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
49e5ff2159SAndi Kleen unsigned int default_hstate_idx;
50e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
51cf11e85fSRoman Gushchin 
52dbda8feaSBarry Song #ifdef CONFIG_CMA
53cf11e85fSRoman Gushchin static struct cma *hugetlb_cma[MAX_NUMNODES];
5438e719abSBaolin Wang static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
55a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
56a01f4390SMike Kravetz {
57a01f4390SMike Kravetz 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
58a01f4390SMike Kravetz 				1 << order);
59a01f4390SMike Kravetz }
60a01f4390SMike Kravetz #else
61a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
62a01f4390SMike Kravetz {
63a01f4390SMike Kravetz 	return false;
64a01f4390SMike Kravetz }
65dbda8feaSBarry Song #endif
66dbda8feaSBarry Song static unsigned long hugetlb_cma_size __initdata;
67cf11e85fSRoman Gushchin 
68641844f5SNaoya Horiguchi /*
69641844f5SNaoya Horiguchi  * Minimum page order among possible hugepage sizes, set to a proper value
70641844f5SNaoya Horiguchi  * at boot time.
71641844f5SNaoya Horiguchi  */
72641844f5SNaoya Horiguchi static unsigned int minimum_order __read_mostly = UINT_MAX;
73e5ff2159SAndi Kleen 
7453ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
7553ba51d2SJon Tollefson 
76e5ff2159SAndi Kleen /* for command line parsing */
77e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
78e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
799fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
80282f4214SMike Kravetz static bool __initdata parsed_default_hugepagesz;
81b5389086SZhenguo Yao static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
82e5ff2159SAndi Kleen 
833935baa9SDavid Gibson /*
8431caf665SNaoya Horiguchi  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
8531caf665SNaoya Horiguchi  * free_huge_pages, and surplus_huge_pages.
863935baa9SDavid Gibson  */
87c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
880bd0f9fbSEric Paris 
898382d914SDavidlohr Bueso /*
908382d914SDavidlohr Bueso  * Serializes faults on the same logical page.  This is used to
918382d914SDavidlohr Bueso  * prevent spurious OOMs when the hugepage pool is fully utilized.
928382d914SDavidlohr Bueso  */
938382d914SDavidlohr Bueso static int num_fault_mutexes;
94c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
958382d914SDavidlohr Bueso 
967ca02d0aSMike Kravetz /* Forward declaration */
977ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
987ca02d0aSMike Kravetz 
991d88433bSMiaohe Lin static inline bool subpool_is_free(struct hugepage_subpool *spool)
1001d88433bSMiaohe Lin {
1011d88433bSMiaohe Lin 	if (spool->count)
1021d88433bSMiaohe Lin 		return false;
1031d88433bSMiaohe Lin 	if (spool->max_hpages != -1)
1041d88433bSMiaohe Lin 		return spool->used_hpages == 0;
1051d88433bSMiaohe Lin 	if (spool->min_hpages != -1)
1061d88433bSMiaohe Lin 		return spool->rsv_hpages == spool->min_hpages;
1071d88433bSMiaohe Lin 
1081d88433bSMiaohe Lin 	return true;
1091d88433bSMiaohe Lin }
1101d88433bSMiaohe Lin 
111db71ef79SMike Kravetz static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
112db71ef79SMike Kravetz 						unsigned long irq_flags)
11390481622SDavid Gibson {
114db71ef79SMike Kravetz 	spin_unlock_irqrestore(&spool->lock, irq_flags);
11590481622SDavid Gibson 
11690481622SDavid Gibson 	/* If no pages are used, and no other handles to the subpool
1177c8de358SEthon Paul 	 * remain, give up any reservations based on minimum size and
1187ca02d0aSMike Kravetz 	 * free the subpool */
1191d88433bSMiaohe Lin 	if (subpool_is_free(spool)) {
1207ca02d0aSMike Kravetz 		if (spool->min_hpages != -1)
1217ca02d0aSMike Kravetz 			hugetlb_acct_memory(spool->hstate,
1227ca02d0aSMike Kravetz 						-spool->min_hpages);
12390481622SDavid Gibson 		kfree(spool);
12490481622SDavid Gibson 	}
1257ca02d0aSMike Kravetz }
12690481622SDavid Gibson 
1277ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
1287ca02d0aSMike Kravetz 						long min_hpages)
12990481622SDavid Gibson {
13090481622SDavid Gibson 	struct hugepage_subpool *spool;
13190481622SDavid Gibson 
132c6a91820SMike Kravetz 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
13390481622SDavid Gibson 	if (!spool)
13490481622SDavid Gibson 		return NULL;
13590481622SDavid Gibson 
13690481622SDavid Gibson 	spin_lock_init(&spool->lock);
13790481622SDavid Gibson 	spool->count = 1;
1387ca02d0aSMike Kravetz 	spool->max_hpages = max_hpages;
1397ca02d0aSMike Kravetz 	spool->hstate = h;
1407ca02d0aSMike Kravetz 	spool->min_hpages = min_hpages;
1417ca02d0aSMike Kravetz 
1427ca02d0aSMike Kravetz 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1437ca02d0aSMike Kravetz 		kfree(spool);
1447ca02d0aSMike Kravetz 		return NULL;
1457ca02d0aSMike Kravetz 	}
1467ca02d0aSMike Kravetz 	spool->rsv_hpages = min_hpages;
14790481622SDavid Gibson 
14890481622SDavid Gibson 	return spool;
14990481622SDavid Gibson }
15090481622SDavid Gibson 
15190481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
15290481622SDavid Gibson {
153db71ef79SMike Kravetz 	unsigned long flags;
154db71ef79SMike Kravetz 
155db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
15690481622SDavid Gibson 	BUG_ON(!spool->count);
15790481622SDavid Gibson 	spool->count--;
158db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
15990481622SDavid Gibson }
16090481622SDavid Gibson 
1611c5ecae3SMike Kravetz /*
1621c5ecae3SMike Kravetz  * Subpool accounting for allocating and reserving pages.
1631c5ecae3SMike Kravetz  * Return -ENOMEM if there are not enough resources to satisfy the
1649e7ee400SRandy Dunlap  * request.  Otherwise, return the number of pages by which the
1651c5ecae3SMike Kravetz  * global pools must be adjusted (upward).  The returned value may
1661c5ecae3SMike Kravetz  * only be different than the passed value (delta) in the case where
1677c8de358SEthon Paul  * a subpool minimum size must be maintained.
1681c5ecae3SMike Kravetz  */
1691c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
17090481622SDavid Gibson 				      long delta)
17190481622SDavid Gibson {
1721c5ecae3SMike Kravetz 	long ret = delta;
17390481622SDavid Gibson 
17490481622SDavid Gibson 	if (!spool)
1751c5ecae3SMike Kravetz 		return ret;
17690481622SDavid Gibson 
177db71ef79SMike Kravetz 	spin_lock_irq(&spool->lock);
17890481622SDavid Gibson 
1791c5ecae3SMike Kravetz 	if (spool->max_hpages != -1) {		/* maximum size accounting */
1801c5ecae3SMike Kravetz 		if ((spool->used_hpages + delta) <= spool->max_hpages)
1811c5ecae3SMike Kravetz 			spool->used_hpages += delta;
1821c5ecae3SMike Kravetz 		else {
1831c5ecae3SMike Kravetz 			ret = -ENOMEM;
1841c5ecae3SMike Kravetz 			goto unlock_ret;
1851c5ecae3SMike Kravetz 		}
1861c5ecae3SMike Kravetz 	}
1871c5ecae3SMike Kravetz 
18809a95e29SMike Kravetz 	/* minimum size accounting */
18909a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
1901c5ecae3SMike Kravetz 		if (delta > spool->rsv_hpages) {
1911c5ecae3SMike Kravetz 			/*
1921c5ecae3SMike Kravetz 			 * Asking for more reserves than those already taken on
1931c5ecae3SMike Kravetz 			 * behalf of subpool.  Return difference.
1941c5ecae3SMike Kravetz 			 */
1951c5ecae3SMike Kravetz 			ret = delta - spool->rsv_hpages;
1961c5ecae3SMike Kravetz 			spool->rsv_hpages = 0;
1971c5ecae3SMike Kravetz 		} else {
1981c5ecae3SMike Kravetz 			ret = 0;	/* reserves already accounted for */
1991c5ecae3SMike Kravetz 			spool->rsv_hpages -= delta;
2001c5ecae3SMike Kravetz 		}
2011c5ecae3SMike Kravetz 	}
2021c5ecae3SMike Kravetz 
2031c5ecae3SMike Kravetz unlock_ret:
204db71ef79SMike Kravetz 	spin_unlock_irq(&spool->lock);
20590481622SDavid Gibson 	return ret;
20690481622SDavid Gibson }
20790481622SDavid Gibson 
2081c5ecae3SMike Kravetz /*
2091c5ecae3SMike Kravetz  * Subpool accounting for freeing and unreserving pages.
2101c5ecae3SMike Kravetz  * Return the number of global page reservations that must be dropped.
2111c5ecae3SMike Kravetz  * The return value may only be different than the passed value (delta)
2121c5ecae3SMike Kravetz  * in the case where a subpool minimum size must be maintained.
2131c5ecae3SMike Kravetz  */
2141c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
21590481622SDavid Gibson 				       long delta)
21690481622SDavid Gibson {
2171c5ecae3SMike Kravetz 	long ret = delta;
218db71ef79SMike Kravetz 	unsigned long flags;
2191c5ecae3SMike Kravetz 
22090481622SDavid Gibson 	if (!spool)
2211c5ecae3SMike Kravetz 		return delta;
22290481622SDavid Gibson 
223db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
2241c5ecae3SMike Kravetz 
2251c5ecae3SMike Kravetz 	if (spool->max_hpages != -1)		/* maximum size accounting */
22690481622SDavid Gibson 		spool->used_hpages -= delta;
2271c5ecae3SMike Kravetz 
22809a95e29SMike Kravetz 	 /* minimum size accounting */
22909a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
2301c5ecae3SMike Kravetz 		if (spool->rsv_hpages + delta <= spool->min_hpages)
2311c5ecae3SMike Kravetz 			ret = 0;
2321c5ecae3SMike Kravetz 		else
2331c5ecae3SMike Kravetz 			ret = spool->rsv_hpages + delta - spool->min_hpages;
2341c5ecae3SMike Kravetz 
2351c5ecae3SMike Kravetz 		spool->rsv_hpages += delta;
2361c5ecae3SMike Kravetz 		if (spool->rsv_hpages > spool->min_hpages)
2371c5ecae3SMike Kravetz 			spool->rsv_hpages = spool->min_hpages;
2381c5ecae3SMike Kravetz 	}
2391c5ecae3SMike Kravetz 
2401c5ecae3SMike Kravetz 	/*
2411c5ecae3SMike Kravetz 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
2421c5ecae3SMike Kravetz 	 * quota reference, free it now.
2431c5ecae3SMike Kravetz 	 */
244db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
2451c5ecae3SMike Kravetz 
2461c5ecae3SMike Kravetz 	return ret;
24790481622SDavid Gibson }
24890481622SDavid Gibson 
24990481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
25090481622SDavid Gibson {
25190481622SDavid Gibson 	return HUGETLBFS_SB(inode->i_sb)->spool;
25290481622SDavid Gibson }
25390481622SDavid Gibson 
25490481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
25590481622SDavid Gibson {
256496ad9aaSAl Viro 	return subpool_inode(file_inode(vma->vm_file));
25790481622SDavid Gibson }
25890481622SDavid Gibson 
2590db9d74eSMina Almasry /* Helper that removes a struct file_region from the resv_map cache and returns
2600db9d74eSMina Almasry  * it for use.
2610db9d74eSMina Almasry  */
2620db9d74eSMina Almasry static struct file_region *
2630db9d74eSMina Almasry get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
2640db9d74eSMina Almasry {
2650db9d74eSMina Almasry 	struct file_region *nrg = NULL;
2660db9d74eSMina Almasry 
2670db9d74eSMina Almasry 	VM_BUG_ON(resv->region_cache_count <= 0);
2680db9d74eSMina Almasry 
2690db9d74eSMina Almasry 	resv->region_cache_count--;
2700db9d74eSMina Almasry 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
2710db9d74eSMina Almasry 	list_del(&nrg->link);
2720db9d74eSMina Almasry 
2730db9d74eSMina Almasry 	nrg->from = from;
2740db9d74eSMina Almasry 	nrg->to = to;
2750db9d74eSMina Almasry 
2760db9d74eSMina Almasry 	return nrg;
2770db9d74eSMina Almasry }
2780db9d74eSMina Almasry 
279075a61d0SMina Almasry static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
280075a61d0SMina Almasry 					      struct file_region *rg)
281075a61d0SMina Almasry {
282075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
283075a61d0SMina Almasry 	nrg->reservation_counter = rg->reservation_counter;
284075a61d0SMina Almasry 	nrg->css = rg->css;
285075a61d0SMina Almasry 	if (rg->css)
286075a61d0SMina Almasry 		css_get(rg->css);
287075a61d0SMina Almasry #endif
288075a61d0SMina Almasry }
289075a61d0SMina Almasry 
290075a61d0SMina Almasry /* Helper that records hugetlb_cgroup uncharge info. */
291075a61d0SMina Almasry static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
292075a61d0SMina Almasry 						struct hstate *h,
293075a61d0SMina Almasry 						struct resv_map *resv,
294075a61d0SMina Almasry 						struct file_region *nrg)
295075a61d0SMina Almasry {
296075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
297075a61d0SMina Almasry 	if (h_cg) {
298075a61d0SMina Almasry 		nrg->reservation_counter =
299075a61d0SMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
300075a61d0SMina Almasry 		nrg->css = &h_cg->css;
301d85aecf2SMiaohe Lin 		/*
302d85aecf2SMiaohe Lin 		 * The caller will hold exactly one h_cg->css reference for the
303d85aecf2SMiaohe Lin 		 * whole contiguous reservation region. But this area might be
304d85aecf2SMiaohe Lin 		 * scattered when there are already some file_regions reside in
305d85aecf2SMiaohe Lin 		 * it. As a result, many file_regions may share only one css
306d85aecf2SMiaohe Lin 		 * reference. In order to ensure that one file_region must hold
307d85aecf2SMiaohe Lin 		 * exactly one h_cg->css reference, we should do css_get for
308d85aecf2SMiaohe Lin 		 * each file_region and leave the reference held by caller
309d85aecf2SMiaohe Lin 		 * untouched.
310d85aecf2SMiaohe Lin 		 */
311d85aecf2SMiaohe Lin 		css_get(&h_cg->css);
312075a61d0SMina Almasry 		if (!resv->pages_per_hpage)
313075a61d0SMina Almasry 			resv->pages_per_hpage = pages_per_huge_page(h);
314075a61d0SMina Almasry 		/* pages_per_hpage should be the same for all entries in
315075a61d0SMina Almasry 		 * a resv_map.
316075a61d0SMina Almasry 		 */
317075a61d0SMina Almasry 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
318075a61d0SMina Almasry 	} else {
319075a61d0SMina Almasry 		nrg->reservation_counter = NULL;
320075a61d0SMina Almasry 		nrg->css = NULL;
321075a61d0SMina Almasry 	}
322075a61d0SMina Almasry #endif
323075a61d0SMina Almasry }
324075a61d0SMina Almasry 
325d85aecf2SMiaohe Lin static void put_uncharge_info(struct file_region *rg)
326d85aecf2SMiaohe Lin {
327d85aecf2SMiaohe Lin #ifdef CONFIG_CGROUP_HUGETLB
328d85aecf2SMiaohe Lin 	if (rg->css)
329d85aecf2SMiaohe Lin 		css_put(rg->css);
330d85aecf2SMiaohe Lin #endif
331d85aecf2SMiaohe Lin }
332d85aecf2SMiaohe Lin 
333a9b3f867SMina Almasry static bool has_same_uncharge_info(struct file_region *rg,
334a9b3f867SMina Almasry 				   struct file_region *org)
335a9b3f867SMina Almasry {
336a9b3f867SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
3370739eb43SBaolin Wang 	return rg->reservation_counter == org->reservation_counter &&
338a9b3f867SMina Almasry 	       rg->css == org->css;
339a9b3f867SMina Almasry 
340a9b3f867SMina Almasry #else
341a9b3f867SMina Almasry 	return true;
342a9b3f867SMina Almasry #endif
343a9b3f867SMina Almasry }
344a9b3f867SMina Almasry 
345a9b3f867SMina Almasry static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
346a9b3f867SMina Almasry {
347a9b3f867SMina Almasry 	struct file_region *nrg = NULL, *prg = NULL;
348a9b3f867SMina Almasry 
349a9b3f867SMina Almasry 	prg = list_prev_entry(rg, link);
350a9b3f867SMina Almasry 	if (&prg->link != &resv->regions && prg->to == rg->from &&
351a9b3f867SMina Almasry 	    has_same_uncharge_info(prg, rg)) {
352a9b3f867SMina Almasry 		prg->to = rg->to;
353a9b3f867SMina Almasry 
354a9b3f867SMina Almasry 		list_del(&rg->link);
355d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
356a9b3f867SMina Almasry 		kfree(rg);
357a9b3f867SMina Almasry 
3587db5e7b6SWei Yang 		rg = prg;
359a9b3f867SMina Almasry 	}
360a9b3f867SMina Almasry 
361a9b3f867SMina Almasry 	nrg = list_next_entry(rg, link);
362a9b3f867SMina Almasry 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
363a9b3f867SMina Almasry 	    has_same_uncharge_info(nrg, rg)) {
364a9b3f867SMina Almasry 		nrg->from = rg->from;
365a9b3f867SMina Almasry 
366a9b3f867SMina Almasry 		list_del(&rg->link);
367d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
368a9b3f867SMina Almasry 		kfree(rg);
369a9b3f867SMina Almasry 	}
370a9b3f867SMina Almasry }
371a9b3f867SMina Almasry 
3722103cf9cSPeter Xu static inline long
37384448c8eSJakob Koschel hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
3742103cf9cSPeter Xu 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
3752103cf9cSPeter Xu 		     long *regions_needed)
3762103cf9cSPeter Xu {
3772103cf9cSPeter Xu 	struct file_region *nrg;
3782103cf9cSPeter Xu 
3792103cf9cSPeter Xu 	if (!regions_needed) {
3802103cf9cSPeter Xu 		nrg = get_file_region_entry_from_cache(map, from, to);
3812103cf9cSPeter Xu 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
38284448c8eSJakob Koschel 		list_add(&nrg->link, rg);
3832103cf9cSPeter Xu 		coalesce_file_region(map, nrg);
3842103cf9cSPeter Xu 	} else
3852103cf9cSPeter Xu 		*regions_needed += 1;
3862103cf9cSPeter Xu 
3872103cf9cSPeter Xu 	return to - from;
3882103cf9cSPeter Xu }
3892103cf9cSPeter Xu 
390972a3da3SWei Yang /*
391972a3da3SWei Yang  * Must be called with resv->lock held.
392972a3da3SWei Yang  *
393972a3da3SWei Yang  * Calling this with regions_needed != NULL will count the number of pages
394972a3da3SWei Yang  * to be added but will not modify the linked list. And regions_needed will
395972a3da3SWei Yang  * indicate the number of file_regions needed in the cache to carry out to add
396972a3da3SWei Yang  * the regions for this range.
397d75c6af9SMina Almasry  */
398d75c6af9SMina Almasry static long add_reservation_in_range(struct resv_map *resv, long f, long t,
399075a61d0SMina Almasry 				     struct hugetlb_cgroup *h_cg,
400972a3da3SWei Yang 				     struct hstate *h, long *regions_needed)
401d75c6af9SMina Almasry {
4020db9d74eSMina Almasry 	long add = 0;
403d75c6af9SMina Almasry 	struct list_head *head = &resv->regions;
4040db9d74eSMina Almasry 	long last_accounted_offset = f;
40584448c8eSJakob Koschel 	struct file_region *iter, *trg = NULL;
40684448c8eSJakob Koschel 	struct list_head *rg = NULL;
407d75c6af9SMina Almasry 
4080db9d74eSMina Almasry 	if (regions_needed)
4090db9d74eSMina Almasry 		*regions_needed = 0;
410d75c6af9SMina Almasry 
4110db9d74eSMina Almasry 	/* In this loop, we essentially handle an entry for the range
41284448c8eSJakob Koschel 	 * [last_accounted_offset, iter->from), at every iteration, with some
4130db9d74eSMina Almasry 	 * bounds checking.
4140db9d74eSMina Almasry 	 */
41584448c8eSJakob Koschel 	list_for_each_entry_safe(iter, trg, head, link) {
4160db9d74eSMina Almasry 		/* Skip irrelevant regions that start before our range. */
41784448c8eSJakob Koschel 		if (iter->from < f) {
4180db9d74eSMina Almasry 			/* If this region ends after the last accounted offset,
4190db9d74eSMina Almasry 			 * then we need to update last_accounted_offset.
4200db9d74eSMina Almasry 			 */
42184448c8eSJakob Koschel 			if (iter->to > last_accounted_offset)
42284448c8eSJakob Koschel 				last_accounted_offset = iter->to;
4230db9d74eSMina Almasry 			continue;
4240db9d74eSMina Almasry 		}
425d75c6af9SMina Almasry 
4260db9d74eSMina Almasry 		/* When we find a region that starts beyond our range, we've
4270db9d74eSMina Almasry 		 * finished.
4280db9d74eSMina Almasry 		 */
42984448c8eSJakob Koschel 		if (iter->from >= t) {
43084448c8eSJakob Koschel 			rg = iter->link.prev;
431d75c6af9SMina Almasry 			break;
43284448c8eSJakob Koschel 		}
433d75c6af9SMina Almasry 
43484448c8eSJakob Koschel 		/* Add an entry for last_accounted_offset -> iter->from, and
4350db9d74eSMina Almasry 		 * update last_accounted_offset.
436d75c6af9SMina Almasry 		 */
43784448c8eSJakob Koschel 		if (iter->from > last_accounted_offset)
43884448c8eSJakob Koschel 			add += hugetlb_resv_map_add(resv, iter->link.prev,
4392103cf9cSPeter Xu 						    last_accounted_offset,
44084448c8eSJakob Koschel 						    iter->from, h, h_cg,
4412103cf9cSPeter Xu 						    regions_needed);
442d75c6af9SMina Almasry 
44384448c8eSJakob Koschel 		last_accounted_offset = iter->to;
4440db9d74eSMina Almasry 	}
4450db9d74eSMina Almasry 
4460db9d74eSMina Almasry 	/* Handle the case where our range extends beyond
4470db9d74eSMina Almasry 	 * last_accounted_offset.
4480db9d74eSMina Almasry 	 */
44984448c8eSJakob Koschel 	if (!rg)
45084448c8eSJakob Koschel 		rg = head->prev;
4512103cf9cSPeter Xu 	if (last_accounted_offset < t)
4522103cf9cSPeter Xu 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
4532103cf9cSPeter Xu 					    t, h, h_cg, regions_needed);
4540db9d74eSMina Almasry 
4550db9d74eSMina Almasry 	return add;
4560db9d74eSMina Almasry }
4570db9d74eSMina Almasry 
4580db9d74eSMina Almasry /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
4590db9d74eSMina Almasry  */
4600db9d74eSMina Almasry static int allocate_file_region_entries(struct resv_map *resv,
4610db9d74eSMina Almasry 					int regions_needed)
4620db9d74eSMina Almasry 	__must_hold(&resv->lock)
4630db9d74eSMina Almasry {
4640db9d74eSMina Almasry 	struct list_head allocated_regions;
4650db9d74eSMina Almasry 	int to_allocate = 0, i = 0;
4660db9d74eSMina Almasry 	struct file_region *trg = NULL, *rg = NULL;
4670db9d74eSMina Almasry 
4680db9d74eSMina Almasry 	VM_BUG_ON(regions_needed < 0);
4690db9d74eSMina Almasry 
4700db9d74eSMina Almasry 	INIT_LIST_HEAD(&allocated_regions);
4710db9d74eSMina Almasry 
4720db9d74eSMina Almasry 	/*
4730db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
4740db9d74eSMina Almasry 	 * the number of in progress add operations plus regions_needed.
4750db9d74eSMina Almasry 	 *
4760db9d74eSMina Almasry 	 * This is a while loop because when we drop the lock, some other call
4770db9d74eSMina Almasry 	 * to region_add or region_del may have consumed some region_entries,
4780db9d74eSMina Almasry 	 * so we keep looping here until we finally have enough entries for
4790db9d74eSMina Almasry 	 * (adds_in_progress + regions_needed).
4800db9d74eSMina Almasry 	 */
4810db9d74eSMina Almasry 	while (resv->region_cache_count <
4820db9d74eSMina Almasry 	       (resv->adds_in_progress + regions_needed)) {
4830db9d74eSMina Almasry 		to_allocate = resv->adds_in_progress + regions_needed -
4840db9d74eSMina Almasry 			      resv->region_cache_count;
4850db9d74eSMina Almasry 
4860db9d74eSMina Almasry 		/* At this point, we should have enough entries in the cache
487f0953a1bSIngo Molnar 		 * for all the existing adds_in_progress. We should only be
4880db9d74eSMina Almasry 		 * needing to allocate for regions_needed.
4890db9d74eSMina Almasry 		 */
4900db9d74eSMina Almasry 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
4910db9d74eSMina Almasry 
4920db9d74eSMina Almasry 		spin_unlock(&resv->lock);
4930db9d74eSMina Almasry 		for (i = 0; i < to_allocate; i++) {
4940db9d74eSMina Almasry 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
4950db9d74eSMina Almasry 			if (!trg)
4960db9d74eSMina Almasry 				goto out_of_memory;
4970db9d74eSMina Almasry 			list_add(&trg->link, &allocated_regions);
4980db9d74eSMina Almasry 		}
4990db9d74eSMina Almasry 
5000db9d74eSMina Almasry 		spin_lock(&resv->lock);
5010db9d74eSMina Almasry 
502d3ec7b6eSWei Yang 		list_splice(&allocated_regions, &resv->region_cache);
503d3ec7b6eSWei Yang 		resv->region_cache_count += to_allocate;
5040db9d74eSMina Almasry 	}
5050db9d74eSMina Almasry 
5060db9d74eSMina Almasry 	return 0;
5070db9d74eSMina Almasry 
5080db9d74eSMina Almasry out_of_memory:
5090db9d74eSMina Almasry 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
510d75c6af9SMina Almasry 		list_del(&rg->link);
511d75c6af9SMina Almasry 		kfree(rg);
512d75c6af9SMina Almasry 	}
5130db9d74eSMina Almasry 	return -ENOMEM;
514d75c6af9SMina Almasry }
515d75c6af9SMina Almasry 
5161dd308a7SMike Kravetz /*
5171dd308a7SMike Kravetz  * Add the huge page range represented by [f, t) to the reserve
5180db9d74eSMina Almasry  * map.  Regions will be taken from the cache to fill in this range.
5190db9d74eSMina Almasry  * Sufficient regions should exist in the cache due to the previous
5200db9d74eSMina Almasry  * call to region_chg with the same range, but in some cases the cache will not
5210db9d74eSMina Almasry  * have sufficient entries due to races with other code doing region_add or
5220db9d74eSMina Almasry  * region_del.  The extra needed entries will be allocated.
523cf3ad20bSMike Kravetz  *
5240db9d74eSMina Almasry  * regions_needed is the out value provided by a previous call to region_chg.
5250db9d74eSMina Almasry  *
5260db9d74eSMina Almasry  * Return the number of new huge pages added to the map.  This number is greater
5270db9d74eSMina Almasry  * than or equal to zero.  If file_region entries needed to be allocated for
5287c8de358SEthon Paul  * this operation and we were not able to allocate, it returns -ENOMEM.
5290db9d74eSMina Almasry  * region_add of regions of length 1 never allocate file_regions and cannot
5300db9d74eSMina Almasry  * fail; region_chg will always allocate at least 1 entry and a region_add for
5310db9d74eSMina Almasry  * 1 page will only require at most 1 entry.
5321dd308a7SMike Kravetz  */
5330db9d74eSMina Almasry static long region_add(struct resv_map *resv, long f, long t,
534075a61d0SMina Almasry 		       long in_regions_needed, struct hstate *h,
535075a61d0SMina Almasry 		       struct hugetlb_cgroup *h_cg)
53696822904SAndy Whitcroft {
5370db9d74eSMina Almasry 	long add = 0, actual_regions_needed = 0;
53896822904SAndy Whitcroft 
5397b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
5400db9d74eSMina Almasry retry:
5410db9d74eSMina Almasry 
5420db9d74eSMina Almasry 	/* Count how many regions are actually needed to execute this add. */
543972a3da3SWei Yang 	add_reservation_in_range(resv, f, t, NULL, NULL,
544972a3da3SWei Yang 				 &actual_regions_needed);
54596822904SAndy Whitcroft 
5465e911373SMike Kravetz 	/*
5470db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
5480db9d74eSMina Almasry 	 * this add operation. Note that actual_regions_needed may be greater
5490db9d74eSMina Almasry 	 * than in_regions_needed, as the resv_map may have been modified since
5500db9d74eSMina Almasry 	 * the region_chg call. In this case, we need to make sure that we
5510db9d74eSMina Almasry 	 * allocate extra entries, such that we have enough for all the
5520db9d74eSMina Almasry 	 * existing adds_in_progress, plus the excess needed for this
5530db9d74eSMina Almasry 	 * operation.
5545e911373SMike Kravetz 	 */
5550db9d74eSMina Almasry 	if (actual_regions_needed > in_regions_needed &&
5560db9d74eSMina Almasry 	    resv->region_cache_count <
5570db9d74eSMina Almasry 		    resv->adds_in_progress +
5580db9d74eSMina Almasry 			    (actual_regions_needed - in_regions_needed)) {
5590db9d74eSMina Almasry 		/* region_add operation of range 1 should never need to
5600db9d74eSMina Almasry 		 * allocate file_region entries.
5610db9d74eSMina Almasry 		 */
5620db9d74eSMina Almasry 		VM_BUG_ON(t - f <= 1);
5635e911373SMike Kravetz 
5640db9d74eSMina Almasry 		if (allocate_file_region_entries(
5650db9d74eSMina Almasry 			    resv, actual_regions_needed - in_regions_needed)) {
5660db9d74eSMina Almasry 			return -ENOMEM;
5675e911373SMike Kravetz 		}
5685e911373SMike Kravetz 
5690db9d74eSMina Almasry 		goto retry;
5700db9d74eSMina Almasry 	}
571cf3ad20bSMike Kravetz 
572972a3da3SWei Yang 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
5730db9d74eSMina Almasry 
5740db9d74eSMina Almasry 	resv->adds_in_progress -= in_regions_needed;
5750db9d74eSMina Almasry 
5767b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
577cf3ad20bSMike Kravetz 	return add;
57896822904SAndy Whitcroft }
57996822904SAndy Whitcroft 
5801dd308a7SMike Kravetz /*
5811dd308a7SMike Kravetz  * Examine the existing reserve map and determine how many
5821dd308a7SMike Kravetz  * huge pages in the specified range [f, t) are NOT currently
5831dd308a7SMike Kravetz  * represented.  This routine is called before a subsequent
5841dd308a7SMike Kravetz  * call to region_add that will actually modify the reserve
5851dd308a7SMike Kravetz  * map to add the specified range [f, t).  region_chg does
5861dd308a7SMike Kravetz  * not change the number of huge pages represented by the
5870db9d74eSMina Almasry  * map.  A number of new file_region structures is added to the cache as a
5880db9d74eSMina Almasry  * placeholder, for the subsequent region_add call to use. At least 1
5890db9d74eSMina Almasry  * file_region structure is added.
5900db9d74eSMina Almasry  *
5910db9d74eSMina Almasry  * out_regions_needed is the number of regions added to the
5920db9d74eSMina Almasry  * resv->adds_in_progress.  This value needs to be provided to a follow up call
5930db9d74eSMina Almasry  * to region_add or region_abort for proper accounting.
5945e911373SMike Kravetz  *
5955e911373SMike Kravetz  * Returns the number of huge pages that need to be added to the existing
5965e911373SMike Kravetz  * reservation map for the range [f, t).  This number is greater or equal to
5975e911373SMike Kravetz  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
5985e911373SMike Kravetz  * is needed and can not be allocated.
5991dd308a7SMike Kravetz  */
6000db9d74eSMina Almasry static long region_chg(struct resv_map *resv, long f, long t,
6010db9d74eSMina Almasry 		       long *out_regions_needed)
60296822904SAndy Whitcroft {
60396822904SAndy Whitcroft 	long chg = 0;
60496822904SAndy Whitcroft 
6057b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
6065e911373SMike Kravetz 
607972a3da3SWei Yang 	/* Count how many hugepages in this range are NOT represented. */
608075a61d0SMina Almasry 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
609972a3da3SWei Yang 				       out_regions_needed);
6105e911373SMike Kravetz 
6110db9d74eSMina Almasry 	if (*out_regions_needed == 0)
6120db9d74eSMina Almasry 		*out_regions_needed = 1;
6135e911373SMike Kravetz 
6140db9d74eSMina Almasry 	if (allocate_file_region_entries(resv, *out_regions_needed))
6155e911373SMike Kravetz 		return -ENOMEM;
6165e911373SMike Kravetz 
6170db9d74eSMina Almasry 	resv->adds_in_progress += *out_regions_needed;
61896822904SAndy Whitcroft 
6197b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
62096822904SAndy Whitcroft 	return chg;
62196822904SAndy Whitcroft }
62296822904SAndy Whitcroft 
6231dd308a7SMike Kravetz /*
6245e911373SMike Kravetz  * Abort the in progress add operation.  The adds_in_progress field
6255e911373SMike Kravetz  * of the resv_map keeps track of the operations in progress between
6265e911373SMike Kravetz  * calls to region_chg and region_add.  Operations are sometimes
6275e911373SMike Kravetz  * aborted after the call to region_chg.  In such cases, region_abort
6280db9d74eSMina Almasry  * is called to decrement the adds_in_progress counter. regions_needed
6290db9d74eSMina Almasry  * is the value returned by the region_chg call, it is used to decrement
6300db9d74eSMina Almasry  * the adds_in_progress counter.
6315e911373SMike Kravetz  *
6325e911373SMike Kravetz  * NOTE: The range arguments [f, t) are not needed or used in this
6335e911373SMike Kravetz  * routine.  They are kept to make reading the calling code easier as
6345e911373SMike Kravetz  * arguments will match the associated region_chg call.
6355e911373SMike Kravetz  */
6360db9d74eSMina Almasry static void region_abort(struct resv_map *resv, long f, long t,
6370db9d74eSMina Almasry 			 long regions_needed)
6385e911373SMike Kravetz {
6395e911373SMike Kravetz 	spin_lock(&resv->lock);
6405e911373SMike Kravetz 	VM_BUG_ON(!resv->region_cache_count);
6410db9d74eSMina Almasry 	resv->adds_in_progress -= regions_needed;
6425e911373SMike Kravetz 	spin_unlock(&resv->lock);
6435e911373SMike Kravetz }
6445e911373SMike Kravetz 
6455e911373SMike Kravetz /*
646feba16e2SMike Kravetz  * Delete the specified range [f, t) from the reserve map.  If the
647feba16e2SMike Kravetz  * t parameter is LONG_MAX, this indicates that ALL regions after f
648feba16e2SMike Kravetz  * should be deleted.  Locate the regions which intersect [f, t)
649feba16e2SMike Kravetz  * and either trim, delete or split the existing regions.
650feba16e2SMike Kravetz  *
651feba16e2SMike Kravetz  * Returns the number of huge pages deleted from the reserve map.
652feba16e2SMike Kravetz  * In the normal case, the return value is zero or more.  In the
653feba16e2SMike Kravetz  * case where a region must be split, a new region descriptor must
654feba16e2SMike Kravetz  * be allocated.  If the allocation fails, -ENOMEM will be returned.
655feba16e2SMike Kravetz  * NOTE: If the parameter t == LONG_MAX, then we will never split
656feba16e2SMike Kravetz  * a region and possibly return -ENOMEM.  Callers specifying
657feba16e2SMike Kravetz  * t == LONG_MAX do not need to check for -ENOMEM error.
6581dd308a7SMike Kravetz  */
659feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
66096822904SAndy Whitcroft {
6611406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
66296822904SAndy Whitcroft 	struct file_region *rg, *trg;
663feba16e2SMike Kravetz 	struct file_region *nrg = NULL;
664feba16e2SMike Kravetz 	long del = 0;
66596822904SAndy Whitcroft 
666feba16e2SMike Kravetz retry:
6677b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
668feba16e2SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
669dbe409e4SMike Kravetz 		/*
670dbe409e4SMike Kravetz 		 * Skip regions before the range to be deleted.  file_region
671dbe409e4SMike Kravetz 		 * ranges are normally of the form [from, to).  However, there
672dbe409e4SMike Kravetz 		 * may be a "placeholder" entry in the map which is of the form
673dbe409e4SMike Kravetz 		 * (from, to) with from == to.  Check for placeholder entries
674dbe409e4SMike Kravetz 		 * at the beginning of the range to be deleted.
675dbe409e4SMike Kravetz 		 */
676dbe409e4SMike Kravetz 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
677feba16e2SMike Kravetz 			continue;
678dbe409e4SMike Kravetz 
679feba16e2SMike Kravetz 		if (rg->from >= t)
68096822904SAndy Whitcroft 			break;
68196822904SAndy Whitcroft 
682feba16e2SMike Kravetz 		if (f > rg->from && t < rg->to) { /* Must split region */
683feba16e2SMike Kravetz 			/*
684feba16e2SMike Kravetz 			 * Check for an entry in the cache before dropping
685feba16e2SMike Kravetz 			 * lock and attempting allocation.
686feba16e2SMike Kravetz 			 */
687feba16e2SMike Kravetz 			if (!nrg &&
688feba16e2SMike Kravetz 			    resv->region_cache_count > resv->adds_in_progress) {
689feba16e2SMike Kravetz 				nrg = list_first_entry(&resv->region_cache,
690feba16e2SMike Kravetz 							struct file_region,
691feba16e2SMike Kravetz 							link);
692feba16e2SMike Kravetz 				list_del(&nrg->link);
693feba16e2SMike Kravetz 				resv->region_cache_count--;
69496822904SAndy Whitcroft 			}
69596822904SAndy Whitcroft 
696feba16e2SMike Kravetz 			if (!nrg) {
697feba16e2SMike Kravetz 				spin_unlock(&resv->lock);
698feba16e2SMike Kravetz 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
699feba16e2SMike Kravetz 				if (!nrg)
700feba16e2SMike Kravetz 					return -ENOMEM;
701feba16e2SMike Kravetz 				goto retry;
702feba16e2SMike Kravetz 			}
703feba16e2SMike Kravetz 
704feba16e2SMike Kravetz 			del += t - f;
70579aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_file_region(
706d85aecf2SMiaohe Lin 				resv, rg, t - f, false);
707feba16e2SMike Kravetz 
708feba16e2SMike Kravetz 			/* New entry for end of split region */
709feba16e2SMike Kravetz 			nrg->from = t;
710feba16e2SMike Kravetz 			nrg->to = rg->to;
711075a61d0SMina Almasry 
712075a61d0SMina Almasry 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
713075a61d0SMina Almasry 
714feba16e2SMike Kravetz 			INIT_LIST_HEAD(&nrg->link);
715feba16e2SMike Kravetz 
716feba16e2SMike Kravetz 			/* Original entry is trimmed */
717feba16e2SMike Kravetz 			rg->to = f;
718feba16e2SMike Kravetz 
719feba16e2SMike Kravetz 			list_add(&nrg->link, &rg->link);
720feba16e2SMike Kravetz 			nrg = NULL;
72196822904SAndy Whitcroft 			break;
722feba16e2SMike Kravetz 		}
723feba16e2SMike Kravetz 
724feba16e2SMike Kravetz 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
725feba16e2SMike Kravetz 			del += rg->to - rg->from;
726075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
727d85aecf2SMiaohe Lin 							    rg->to - rg->from, true);
72896822904SAndy Whitcroft 			list_del(&rg->link);
72996822904SAndy Whitcroft 			kfree(rg);
730feba16e2SMike Kravetz 			continue;
73196822904SAndy Whitcroft 		}
7327b24d861SDavidlohr Bueso 
733feba16e2SMike Kravetz 		if (f <= rg->from) {	/* Trim beginning of region */
734075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
735d85aecf2SMiaohe Lin 							    t - rg->from, false);
736075a61d0SMina Almasry 
73779aa925bSMike Kravetz 			del += t - rg->from;
73879aa925bSMike Kravetz 			rg->from = t;
73979aa925bSMike Kravetz 		} else {		/* Trim end of region */
740075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
741d85aecf2SMiaohe Lin 							    rg->to - f, false);
74279aa925bSMike Kravetz 
74379aa925bSMike Kravetz 			del += rg->to - f;
74479aa925bSMike Kravetz 			rg->to = f;
745feba16e2SMike Kravetz 		}
746feba16e2SMike Kravetz 	}
747feba16e2SMike Kravetz 
7487b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
749feba16e2SMike Kravetz 	kfree(nrg);
750feba16e2SMike Kravetz 	return del;
75196822904SAndy Whitcroft }
75296822904SAndy Whitcroft 
7531dd308a7SMike Kravetz /*
754b5cec28dSMike Kravetz  * A rare out of memory error was encountered which prevented removal of
755b5cec28dSMike Kravetz  * the reserve map region for a page.  The huge page itself was free'ed
756b5cec28dSMike Kravetz  * and removed from the page cache.  This routine will adjust the subpool
757b5cec28dSMike Kravetz  * usage count, and the global reserve count if needed.  By incrementing
758b5cec28dSMike Kravetz  * these counts, the reserve map entry which could not be deleted will
759b5cec28dSMike Kravetz  * appear as a "reserved" entry instead of simply dangling with incorrect
760b5cec28dSMike Kravetz  * counts.
761b5cec28dSMike Kravetz  */
76272e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
763b5cec28dSMike Kravetz {
764b5cec28dSMike Kravetz 	struct hugepage_subpool *spool = subpool_inode(inode);
765b5cec28dSMike Kravetz 	long rsv_adjust;
766da56388cSMiaohe Lin 	bool reserved = false;
767b5cec28dSMike Kravetz 
768b5cec28dSMike Kravetz 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
769da56388cSMiaohe Lin 	if (rsv_adjust > 0) {
770b5cec28dSMike Kravetz 		struct hstate *h = hstate_inode(inode);
771b5cec28dSMike Kravetz 
772da56388cSMiaohe Lin 		if (!hugetlb_acct_memory(h, 1))
773da56388cSMiaohe Lin 			reserved = true;
774da56388cSMiaohe Lin 	} else if (!rsv_adjust) {
775da56388cSMiaohe Lin 		reserved = true;
776b5cec28dSMike Kravetz 	}
777da56388cSMiaohe Lin 
778da56388cSMiaohe Lin 	if (!reserved)
779da56388cSMiaohe Lin 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
780b5cec28dSMike Kravetz }
781b5cec28dSMike Kravetz 
782b5cec28dSMike Kravetz /*
7831dd308a7SMike Kravetz  * Count and return the number of huge pages in the reserve map
7841dd308a7SMike Kravetz  * that intersect with the range [f, t).
7851dd308a7SMike Kravetz  */
7861406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
78784afd99bSAndy Whitcroft {
7881406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
78984afd99bSAndy Whitcroft 	struct file_region *rg;
79084afd99bSAndy Whitcroft 	long chg = 0;
79184afd99bSAndy Whitcroft 
7927b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
79384afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
79484afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
795f2135a4aSWang Sheng-Hui 		long seg_from;
796f2135a4aSWang Sheng-Hui 		long seg_to;
79784afd99bSAndy Whitcroft 
79884afd99bSAndy Whitcroft 		if (rg->to <= f)
79984afd99bSAndy Whitcroft 			continue;
80084afd99bSAndy Whitcroft 		if (rg->from >= t)
80184afd99bSAndy Whitcroft 			break;
80284afd99bSAndy Whitcroft 
80384afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
80484afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
80584afd99bSAndy Whitcroft 
80684afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
80784afd99bSAndy Whitcroft 	}
8087b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
80984afd99bSAndy Whitcroft 
81084afd99bSAndy Whitcroft 	return chg;
81184afd99bSAndy Whitcroft }
81284afd99bSAndy Whitcroft 
81396822904SAndy Whitcroft /*
814e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
815e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
816e7c4b0bfSAndy Whitcroft  */
817a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
818a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
819e7c4b0bfSAndy Whitcroft {
820a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
821a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
822e7c4b0bfSAndy Whitcroft }
823e7c4b0bfSAndy Whitcroft 
8240fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
8250fe6e20bSNaoya Horiguchi 				     unsigned long address)
8260fe6e20bSNaoya Horiguchi {
8270fe6e20bSNaoya Horiguchi 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
8280fe6e20bSNaoya Horiguchi }
829dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
8300fe6e20bSNaoya Horiguchi 
83184afd99bSAndy Whitcroft /*
83208fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
83308fba699SMel Gorman  * cases this will be same size as used by the page table entries.
83408fba699SMel Gorman  */
83508fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
83608fba699SMel Gorman {
83705ea8860SDan Williams 	if (vma->vm_ops && vma->vm_ops->pagesize)
83805ea8860SDan Williams 		return vma->vm_ops->pagesize(vma);
83908fba699SMel Gorman 	return PAGE_SIZE;
84008fba699SMel Gorman }
841f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
84208fba699SMel Gorman 
84308fba699SMel Gorman /*
8443340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
8453340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
84609135cc5SDan Williams  * architectures where it differs, an architecture-specific 'strong'
84709135cc5SDan Williams  * version of this symbol is required.
8483340289dSMel Gorman  */
84909135cc5SDan Williams __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
8503340289dSMel Gorman {
8513340289dSMel Gorman 	return vma_kernel_pagesize(vma);
8523340289dSMel Gorman }
8533340289dSMel Gorman 
8543340289dSMel Gorman /*
85584afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
85684afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
85784afd99bSAndy Whitcroft  * alignment.
85884afd99bSAndy Whitcroft  */
85984afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
86084afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
86104f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
86284afd99bSAndy Whitcroft 
863a1e78772SMel Gorman /*
864a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
865a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
866a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
867a1e78772SMel Gorman  *
868a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
869a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
870a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
871a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
87284afd99bSAndy Whitcroft  *
87384afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
87484afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
87584afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
87684afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
87784afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
87884afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
87984afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
88084afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
881a1e78772SMel Gorman  */
882e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
883e7c4b0bfSAndy Whitcroft {
884e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
885e7c4b0bfSAndy Whitcroft }
886e7c4b0bfSAndy Whitcroft 
887e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
888e7c4b0bfSAndy Whitcroft 							unsigned long value)
889e7c4b0bfSAndy Whitcroft {
890e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
891e7c4b0bfSAndy Whitcroft }
892e7c4b0bfSAndy Whitcroft 
893e9fe92aeSMina Almasry static void
894e9fe92aeSMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
895e9fe92aeSMina Almasry 					  struct hugetlb_cgroup *h_cg,
896e9fe92aeSMina Almasry 					  struct hstate *h)
897e9fe92aeSMina Almasry {
898e9fe92aeSMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
899e9fe92aeSMina Almasry 	if (!h_cg || !h) {
900e9fe92aeSMina Almasry 		resv_map->reservation_counter = NULL;
901e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = 0;
902e9fe92aeSMina Almasry 		resv_map->css = NULL;
903e9fe92aeSMina Almasry 	} else {
904e9fe92aeSMina Almasry 		resv_map->reservation_counter =
905e9fe92aeSMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
906e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = pages_per_huge_page(h);
907e9fe92aeSMina Almasry 		resv_map->css = &h_cg->css;
908e9fe92aeSMina Almasry 	}
909e9fe92aeSMina Almasry #endif
910e9fe92aeSMina Almasry }
911e9fe92aeSMina Almasry 
9129119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
91384afd99bSAndy Whitcroft {
91484afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
9155e911373SMike Kravetz 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
9165e911373SMike Kravetz 
9175e911373SMike Kravetz 	if (!resv_map || !rg) {
9185e911373SMike Kravetz 		kfree(resv_map);
9195e911373SMike Kravetz 		kfree(rg);
92084afd99bSAndy Whitcroft 		return NULL;
9215e911373SMike Kravetz 	}
92284afd99bSAndy Whitcroft 
92384afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
9247b24d861SDavidlohr Bueso 	spin_lock_init(&resv_map->lock);
92584afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
92684afd99bSAndy Whitcroft 
9275e911373SMike Kravetz 	resv_map->adds_in_progress = 0;
928e9fe92aeSMina Almasry 	/*
929e9fe92aeSMina Almasry 	 * Initialize these to 0. On shared mappings, 0's here indicate these
930e9fe92aeSMina Almasry 	 * fields don't do cgroup accounting. On private mappings, these will be
931e9fe92aeSMina Almasry 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
932e9fe92aeSMina Almasry 	 * reservations are to be un-charged from here.
933e9fe92aeSMina Almasry 	 */
934e9fe92aeSMina Almasry 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
9355e911373SMike Kravetz 
9365e911373SMike Kravetz 	INIT_LIST_HEAD(&resv_map->region_cache);
9375e911373SMike Kravetz 	list_add(&rg->link, &resv_map->region_cache);
9385e911373SMike Kravetz 	resv_map->region_cache_count = 1;
9395e911373SMike Kravetz 
94084afd99bSAndy Whitcroft 	return resv_map;
94184afd99bSAndy Whitcroft }
94284afd99bSAndy Whitcroft 
9439119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
94484afd99bSAndy Whitcroft {
94584afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
9465e911373SMike Kravetz 	struct list_head *head = &resv_map->region_cache;
9475e911373SMike Kravetz 	struct file_region *rg, *trg;
94884afd99bSAndy Whitcroft 
94984afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
950feba16e2SMike Kravetz 	region_del(resv_map, 0, LONG_MAX);
9515e911373SMike Kravetz 
9525e911373SMike Kravetz 	/* ... and any entries left in the cache */
9535e911373SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
9545e911373SMike Kravetz 		list_del(&rg->link);
9555e911373SMike Kravetz 		kfree(rg);
9565e911373SMike Kravetz 	}
9575e911373SMike Kravetz 
9585e911373SMike Kravetz 	VM_BUG_ON(resv_map->adds_in_progress);
9595e911373SMike Kravetz 
96084afd99bSAndy Whitcroft 	kfree(resv_map);
96184afd99bSAndy Whitcroft }
96284afd99bSAndy Whitcroft 
9634e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
9644e35f483SJoonsoo Kim {
965f27a5136SMike Kravetz 	/*
966f27a5136SMike Kravetz 	 * At inode evict time, i_mapping may not point to the original
967f27a5136SMike Kravetz 	 * address space within the inode.  This original address space
968f27a5136SMike Kravetz 	 * contains the pointer to the resv_map.  So, always use the
969f27a5136SMike Kravetz 	 * address space embedded within the inode.
970f27a5136SMike Kravetz 	 * The VERY common case is inode->mapping == &inode->i_data but,
971f27a5136SMike Kravetz 	 * this may not be true for device special inodes.
972f27a5136SMike Kravetz 	 */
973f27a5136SMike Kravetz 	return (struct resv_map *)(&inode->i_data)->private_data;
9744e35f483SJoonsoo Kim }
9754e35f483SJoonsoo Kim 
97684afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
977a1e78772SMel Gorman {
97881d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
9794e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE) {
9804e35f483SJoonsoo Kim 		struct address_space *mapping = vma->vm_file->f_mapping;
9814e35f483SJoonsoo Kim 		struct inode *inode = mapping->host;
9824e35f483SJoonsoo Kim 
9834e35f483SJoonsoo Kim 		return inode_resv_map(inode);
9844e35f483SJoonsoo Kim 
9854e35f483SJoonsoo Kim 	} else {
98684afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
98784afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
9884e35f483SJoonsoo Kim 	}
989a1e78772SMel Gorman }
990a1e78772SMel Gorman 
99184afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
992a1e78772SMel Gorman {
99381d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
99481d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
995a1e78772SMel Gorman 
99684afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
99784afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
99804f2cbe3SMel Gorman }
99904f2cbe3SMel Gorman 
100004f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
100104f2cbe3SMel Gorman {
100281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
100381d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1004e7c4b0bfSAndy Whitcroft 
1005e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
100604f2cbe3SMel Gorman }
100704f2cbe3SMel Gorman 
100804f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
100904f2cbe3SMel Gorman {
101081d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1011e7c4b0bfSAndy Whitcroft 
1012e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
1013a1e78772SMel Gorman }
1014a1e78772SMel Gorman 
101504f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1016a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1017a1e78772SMel Gorman {
101881d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1019f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
1020a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
1021a1e78772SMel Gorman }
1022a1e78772SMel Gorman 
1023550a7d60SMina Almasry /*
1024550a7d60SMina Almasry  * Reset and decrement one ref on hugepage private reservation.
1025550a7d60SMina Almasry  * Called with mm->mmap_sem writer semaphore held.
1026550a7d60SMina Almasry  * This function should be only used by move_vma() and operate on
1027550a7d60SMina Almasry  * same sized vma. It should never come here with last ref on the
1028550a7d60SMina Almasry  * reservation.
1029550a7d60SMina Almasry  */
1030550a7d60SMina Almasry void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1031550a7d60SMina Almasry {
1032550a7d60SMina Almasry 	/*
1033550a7d60SMina Almasry 	 * Clear the old hugetlb private page reservation.
1034550a7d60SMina Almasry 	 * It has already been transferred to new_vma.
1035550a7d60SMina Almasry 	 *
1036550a7d60SMina Almasry 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1037550a7d60SMina Almasry 	 * which copies vma into new_vma and unmaps vma. After the copy
1038550a7d60SMina Almasry 	 * operation both new_vma and vma share a reference to the resv_map
1039550a7d60SMina Almasry 	 * struct, and at that point vma is about to be unmapped. We don't
1040550a7d60SMina Almasry 	 * want to return the reservation to the pool at unmap of vma because
1041550a7d60SMina Almasry 	 * the reservation still lives on in new_vma, so simply decrement the
1042550a7d60SMina Almasry 	 * ref here and remove the resv_map reference from this vma.
1043550a7d60SMina Almasry 	 */
1044550a7d60SMina Almasry 	struct resv_map *reservations = vma_resv_map(vma);
1045550a7d60SMina Almasry 
1046afe041c2SBui Quang Minh 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1047afe041c2SBui Quang Minh 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1048550a7d60SMina Almasry 		kref_put(&reservations->refs, resv_map_release);
1049afe041c2SBui Quang Minh 	}
1050550a7d60SMina Almasry 
1051550a7d60SMina Almasry 	reset_vma_resv_huge_pages(vma);
1052550a7d60SMina Almasry }
1053550a7d60SMina Almasry 
1054a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
1055559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1056a1e78772SMel Gorman {
1057af0ed73eSJoonsoo Kim 	if (vma->vm_flags & VM_NORESERVE) {
1058af0ed73eSJoonsoo Kim 		/*
1059af0ed73eSJoonsoo Kim 		 * This address is already reserved by other process(chg == 0),
1060af0ed73eSJoonsoo Kim 		 * so, we should decrement reserved count. Without decrementing,
1061af0ed73eSJoonsoo Kim 		 * reserve count remains after releasing inode, because this
1062af0ed73eSJoonsoo Kim 		 * allocated page will go into page cache and is regarded as
1063af0ed73eSJoonsoo Kim 		 * coming from reserved pool in releasing step.  Currently, we
1064af0ed73eSJoonsoo Kim 		 * don't have any other solution to deal with this situation
1065af0ed73eSJoonsoo Kim 		 * properly, so add work-around here.
1066af0ed73eSJoonsoo Kim 		 */
1067af0ed73eSJoonsoo Kim 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1068559ec2f8SNicholas Krause 			return true;
1069af0ed73eSJoonsoo Kim 		else
1070559ec2f8SNicholas Krause 			return false;
1071af0ed73eSJoonsoo Kim 	}
1072a63884e9SJoonsoo Kim 
1073a63884e9SJoonsoo Kim 	/* Shared mappings always use reserves */
10741fb1b0e9SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE) {
10751fb1b0e9SMike Kravetz 		/*
10761fb1b0e9SMike Kravetz 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
10771fb1b0e9SMike Kravetz 		 * be a region map for all pages.  The only situation where
10781fb1b0e9SMike Kravetz 		 * there is no region map is if a hole was punched via
10797c8de358SEthon Paul 		 * fallocate.  In this case, there really are no reserves to
10801fb1b0e9SMike Kravetz 		 * use.  This situation is indicated if chg != 0.
10811fb1b0e9SMike Kravetz 		 */
10821fb1b0e9SMike Kravetz 		if (chg)
10831fb1b0e9SMike Kravetz 			return false;
10841fb1b0e9SMike Kravetz 		else
1085559ec2f8SNicholas Krause 			return true;
10861fb1b0e9SMike Kravetz 	}
1087a63884e9SJoonsoo Kim 
1088a63884e9SJoonsoo Kim 	/*
1089a63884e9SJoonsoo Kim 	 * Only the process that called mmap() has reserves for
1090a63884e9SJoonsoo Kim 	 * private mappings.
1091a63884e9SJoonsoo Kim 	 */
109267961f9dSMike Kravetz 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
109367961f9dSMike Kravetz 		/*
109467961f9dSMike Kravetz 		 * Like the shared case above, a hole punch or truncate
109567961f9dSMike Kravetz 		 * could have been performed on the private mapping.
109667961f9dSMike Kravetz 		 * Examine the value of chg to determine if reserves
109767961f9dSMike Kravetz 		 * actually exist or were previously consumed.
109867961f9dSMike Kravetz 		 * Very Subtle - The value of chg comes from a previous
109967961f9dSMike Kravetz 		 * call to vma_needs_reserves().  The reserve map for
110067961f9dSMike Kravetz 		 * private mappings has different (opposite) semantics
110167961f9dSMike Kravetz 		 * than that of shared mappings.  vma_needs_reserves()
110267961f9dSMike Kravetz 		 * has already taken this difference in semantics into
110367961f9dSMike Kravetz 		 * account.  Therefore, the meaning of chg is the same
110467961f9dSMike Kravetz 		 * as in the shared case above.  Code could easily be
110567961f9dSMike Kravetz 		 * combined, but keeping it separate draws attention to
110667961f9dSMike Kravetz 		 * subtle differences.
110767961f9dSMike Kravetz 		 */
110867961f9dSMike Kravetz 		if (chg)
110967961f9dSMike Kravetz 			return false;
111067961f9dSMike Kravetz 		else
1111559ec2f8SNicholas Krause 			return true;
111267961f9dSMike Kravetz 	}
1113a63884e9SJoonsoo Kim 
1114559ec2f8SNicholas Krause 	return false;
1115a1e78772SMel Gorman }
1116a1e78772SMel Gorman 
1117a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
11181da177e4SLinus Torvalds {
11191da177e4SLinus Torvalds 	int nid = page_to_nid(page);
11209487ca60SMike Kravetz 
11219487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1122b65a4edaSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1123b65a4edaSMike Kravetz 
11240edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1125a5516438SAndi Kleen 	h->free_huge_pages++;
1126a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
11276c037149SMike Kravetz 	SetHPageFreed(page);
11281da177e4SLinus Torvalds }
11291da177e4SLinus Torvalds 
113094310cbcSAnshuman Khandual static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1131bf50bab2SNaoya Horiguchi {
1132bf50bab2SNaoya Horiguchi 	struct page *page;
11331a08ae36SPavel Tatashin 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1134bf50bab2SNaoya Horiguchi 
11359487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1136bbe88753SJoonsoo Kim 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
11378e3560d9SPavel Tatashin 		if (pin && !is_pinnable_page(page))
1138bbe88753SJoonsoo Kim 			continue;
1139bbe88753SJoonsoo Kim 
11406664bfc8SWei Yang 		if (PageHWPoison(page))
11416664bfc8SWei Yang 			continue;
1142bbe88753SJoonsoo Kim 
11430edaecfaSAneesh Kumar K.V 		list_move(&page->lru, &h->hugepage_activelist);
1144a9869b83SNaoya Horiguchi 		set_page_refcounted(page);
11456c037149SMike Kravetz 		ClearHPageFreed(page);
1146bf50bab2SNaoya Horiguchi 		h->free_huge_pages--;
1147bf50bab2SNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
1148bf50bab2SNaoya Horiguchi 		return page;
1149bf50bab2SNaoya Horiguchi 	}
1150bf50bab2SNaoya Horiguchi 
11516664bfc8SWei Yang 	return NULL;
11526664bfc8SWei Yang }
11536664bfc8SWei Yang 
11543e59fcb0SMichal Hocko static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
11553e59fcb0SMichal Hocko 		nodemask_t *nmask)
115694310cbcSAnshuman Khandual {
11573e59fcb0SMichal Hocko 	unsigned int cpuset_mems_cookie;
11583e59fcb0SMichal Hocko 	struct zonelist *zonelist;
11593e59fcb0SMichal Hocko 	struct zone *zone;
11603e59fcb0SMichal Hocko 	struct zoneref *z;
116198fa15f3SAnshuman Khandual 	int node = NUMA_NO_NODE;
11623e59fcb0SMichal Hocko 
11633e59fcb0SMichal Hocko 	zonelist = node_zonelist(nid, gfp_mask);
11643e59fcb0SMichal Hocko 
11653e59fcb0SMichal Hocko retry_cpuset:
11663e59fcb0SMichal Hocko 	cpuset_mems_cookie = read_mems_allowed_begin();
11673e59fcb0SMichal Hocko 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
116894310cbcSAnshuman Khandual 		struct page *page;
116994310cbcSAnshuman Khandual 
11703e59fcb0SMichal Hocko 		if (!cpuset_zone_allowed(zone, gfp_mask))
11713e59fcb0SMichal Hocko 			continue;
11723e59fcb0SMichal Hocko 		/*
11733e59fcb0SMichal Hocko 		 * no need to ask again on the same node. Pool is node rather than
11743e59fcb0SMichal Hocko 		 * zone aware
11753e59fcb0SMichal Hocko 		 */
11763e59fcb0SMichal Hocko 		if (zone_to_nid(zone) == node)
11773e59fcb0SMichal Hocko 			continue;
11783e59fcb0SMichal Hocko 		node = zone_to_nid(zone);
117994310cbcSAnshuman Khandual 
118094310cbcSAnshuman Khandual 		page = dequeue_huge_page_node_exact(h, node);
118194310cbcSAnshuman Khandual 		if (page)
118294310cbcSAnshuman Khandual 			return page;
118394310cbcSAnshuman Khandual 	}
11843e59fcb0SMichal Hocko 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
11853e59fcb0SMichal Hocko 		goto retry_cpuset;
11863e59fcb0SMichal Hocko 
118794310cbcSAnshuman Khandual 	return NULL;
118894310cbcSAnshuman Khandual }
118994310cbcSAnshuman Khandual 
1190a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
1191a5516438SAndi Kleen 				struct vm_area_struct *vma,
1192af0ed73eSJoonsoo Kim 				unsigned long address, int avoid_reserve,
1193af0ed73eSJoonsoo Kim 				long chg)
11941da177e4SLinus Torvalds {
1195cfcaa66fSBen Widawsky 	struct page *page = NULL;
1196480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
119704ec6264SVlastimil Babka 	gfp_t gfp_mask;
11983e59fcb0SMichal Hocko 	nodemask_t *nodemask;
119904ec6264SVlastimil Babka 	int nid;
12001da177e4SLinus Torvalds 
1201a1e78772SMel Gorman 	/*
1202a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
1203a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
1204a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
1205a1e78772SMel Gorman 	 */
1206af0ed73eSJoonsoo Kim 	if (!vma_has_reserves(vma, chg) &&
1207a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
1208c0ff7453SMiao Xie 		goto err;
1209a1e78772SMel Gorman 
121004f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
1211a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
12126eab04a8SJustin P. Mattock 		goto err;
121304f2cbe3SMel Gorman 
121404ec6264SVlastimil Babka 	gfp_mask = htlb_alloc_mask(h);
121504ec6264SVlastimil Babka 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1216cfcaa66fSBen Widawsky 
1217cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
12183e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1219cfcaa66fSBen Widawsky 
1220cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
1221cfcaa66fSBen Widawsky 		nodemask = NULL;
1222cfcaa66fSBen Widawsky 	}
1223cfcaa66fSBen Widawsky 
1224cfcaa66fSBen Widawsky 	if (!page)
1225cfcaa66fSBen Widawsky 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1226cfcaa66fSBen Widawsky 
12273e59fcb0SMichal Hocko 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1228d6995da3SMike Kravetz 		SetHPageRestoreReserve(page);
1229a63884e9SJoonsoo Kim 		h->resv_huge_pages--;
1230bf50bab2SNaoya Horiguchi 	}
1231cc9a6c87SMel Gorman 
1232cc9a6c87SMel Gorman 	mpol_cond_put(mpol);
1233cc9a6c87SMel Gorman 	return page;
1234cc9a6c87SMel Gorman 
1235c0ff7453SMiao Xie err:
1236cc9a6c87SMel Gorman 	return NULL;
12371da177e4SLinus Torvalds }
12381da177e4SLinus Torvalds 
12391cac6f2cSLuiz Capitulino /*
12401cac6f2cSLuiz Capitulino  * common helper functions for hstate_next_node_to_{alloc|free}.
12411cac6f2cSLuiz Capitulino  * We may have allocated or freed a huge page based on a different
12421cac6f2cSLuiz Capitulino  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
12431cac6f2cSLuiz Capitulino  * be outside of *nodes_allowed.  Ensure that we use an allowed
12441cac6f2cSLuiz Capitulino  * node for alloc or free.
12451cac6f2cSLuiz Capitulino  */
12461cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
12471cac6f2cSLuiz Capitulino {
12480edaf86cSAndrew Morton 	nid = next_node_in(nid, *nodes_allowed);
12491cac6f2cSLuiz Capitulino 	VM_BUG_ON(nid >= MAX_NUMNODES);
12501cac6f2cSLuiz Capitulino 
12511cac6f2cSLuiz Capitulino 	return nid;
12521cac6f2cSLuiz Capitulino }
12531cac6f2cSLuiz Capitulino 
12541cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
12551cac6f2cSLuiz Capitulino {
12561cac6f2cSLuiz Capitulino 	if (!node_isset(nid, *nodes_allowed))
12571cac6f2cSLuiz Capitulino 		nid = next_node_allowed(nid, nodes_allowed);
12581cac6f2cSLuiz Capitulino 	return nid;
12591cac6f2cSLuiz Capitulino }
12601cac6f2cSLuiz Capitulino 
12611cac6f2cSLuiz Capitulino /*
12621cac6f2cSLuiz Capitulino  * returns the previously saved node ["this node"] from which to
12631cac6f2cSLuiz Capitulino  * allocate a persistent huge page for the pool and advance the
12641cac6f2cSLuiz Capitulino  * next node from which to allocate, handling wrap at end of node
12651cac6f2cSLuiz Capitulino  * mask.
12661cac6f2cSLuiz Capitulino  */
12671cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
12681cac6f2cSLuiz Capitulino 					nodemask_t *nodes_allowed)
12691cac6f2cSLuiz Capitulino {
12701cac6f2cSLuiz Capitulino 	int nid;
12711cac6f2cSLuiz Capitulino 
12721cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12731cac6f2cSLuiz Capitulino 
12741cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
12751cac6f2cSLuiz Capitulino 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
12761cac6f2cSLuiz Capitulino 
12771cac6f2cSLuiz Capitulino 	return nid;
12781cac6f2cSLuiz Capitulino }
12791cac6f2cSLuiz Capitulino 
12801cac6f2cSLuiz Capitulino /*
128110c6ec49SMike Kravetz  * helper for remove_pool_huge_page() - return the previously saved
12821cac6f2cSLuiz Capitulino  * node ["this node"] from which to free a huge page.  Advance the
12831cac6f2cSLuiz Capitulino  * next node id whether or not we find a free huge page to free so
12841cac6f2cSLuiz Capitulino  * that the next attempt to free addresses the next node.
12851cac6f2cSLuiz Capitulino  */
12861cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
12871cac6f2cSLuiz Capitulino {
12881cac6f2cSLuiz Capitulino 	int nid;
12891cac6f2cSLuiz Capitulino 
12901cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12911cac6f2cSLuiz Capitulino 
12921cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
12931cac6f2cSLuiz Capitulino 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
12941cac6f2cSLuiz Capitulino 
12951cac6f2cSLuiz Capitulino 	return nid;
12961cac6f2cSLuiz Capitulino }
12971cac6f2cSLuiz Capitulino 
12981cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
12991cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
13001cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
13011cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
13021cac6f2cSLuiz Capitulino 		nr_nodes--)
13031cac6f2cSLuiz Capitulino 
13041cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
13051cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
13061cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
13071cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
13081cac6f2cSLuiz Capitulino 		nr_nodes--)
13091cac6f2cSLuiz Capitulino 
13108531fc6fSMike Kravetz /* used to demote non-gigantic_huge pages as well */
131134d9e35bSMike Kravetz static void __destroy_compound_gigantic_page(struct page *page,
131234d9e35bSMike Kravetz 					unsigned int order, bool demote)
1313944d9fecSLuiz Capitulino {
1314944d9fecSLuiz Capitulino 	int i;
1315944d9fecSLuiz Capitulino 	int nr_pages = 1 << order;
1316944d9fecSLuiz Capitulino 	struct page *p = page + 1;
1317944d9fecSLuiz Capitulino 
1318c8cc708aSGerald Schaefer 	atomic_set(compound_mapcount_ptr(page), 0);
131947e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
132047e29d32SJohn Hubbard 
1321944d9fecSLuiz Capitulino 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1322a01f4390SMike Kravetz 		p->mapping = NULL;
13231d798ca3SKirill A. Shutemov 		clear_compound_head(p);
132434d9e35bSMike Kravetz 		if (!demote)
1325944d9fecSLuiz Capitulino 			set_page_refcounted(p);
1326944d9fecSLuiz Capitulino 	}
1327944d9fecSLuiz Capitulino 
1328944d9fecSLuiz Capitulino 	set_compound_order(page, 0);
13295232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
1330ba9c1201SGerald Schaefer 	page[1].compound_nr = 0;
13315232c63fSMatthew Wilcox (Oracle) #endif
1332944d9fecSLuiz Capitulino 	__ClearPageHead(page);
1333944d9fecSLuiz Capitulino }
1334944d9fecSLuiz Capitulino 
13358531fc6fSMike Kravetz static void destroy_compound_hugetlb_page_for_demote(struct page *page,
13368531fc6fSMike Kravetz 					unsigned int order)
13378531fc6fSMike Kravetz {
13388531fc6fSMike Kravetz 	__destroy_compound_gigantic_page(page, order, true);
13398531fc6fSMike Kravetz }
13408531fc6fSMike Kravetz 
13418531fc6fSMike Kravetz #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
134234d9e35bSMike Kravetz static void destroy_compound_gigantic_page(struct page *page,
134334d9e35bSMike Kravetz 					unsigned int order)
134434d9e35bSMike Kravetz {
134534d9e35bSMike Kravetz 	__destroy_compound_gigantic_page(page, order, false);
134634d9e35bSMike Kravetz }
134734d9e35bSMike Kravetz 
1348d00181b9SKirill A. Shutemov static void free_gigantic_page(struct page *page, unsigned int order)
1349944d9fecSLuiz Capitulino {
1350cf11e85fSRoman Gushchin 	/*
1351cf11e85fSRoman Gushchin 	 * If the page isn't allocated using the cma allocator,
1352cf11e85fSRoman Gushchin 	 * cma_release() returns false.
1353cf11e85fSRoman Gushchin 	 */
1354dbda8feaSBarry Song #ifdef CONFIG_CMA
1355dbda8feaSBarry Song 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1356cf11e85fSRoman Gushchin 		return;
1357dbda8feaSBarry Song #endif
1358cf11e85fSRoman Gushchin 
1359944d9fecSLuiz Capitulino 	free_contig_range(page_to_pfn(page), 1 << order);
1360944d9fecSLuiz Capitulino }
1361944d9fecSLuiz Capitulino 
13624eb0716eSAlexandre Ghiti #ifdef CONFIG_CONTIG_ALLOC
1363d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1364d9cc948fSMichal Hocko 		int nid, nodemask_t *nodemask)
1365944d9fecSLuiz Capitulino {
136604adbc3fSMiaohe Lin 	unsigned long nr_pages = pages_per_huge_page(h);
1367953f064aSLi Xinhai 	if (nid == NUMA_NO_NODE)
1368953f064aSLi Xinhai 		nid = numa_mem_id();
1369944d9fecSLuiz Capitulino 
1370dbda8feaSBarry Song #ifdef CONFIG_CMA
1371dbda8feaSBarry Song 	{
1372cf11e85fSRoman Gushchin 		struct page *page;
1373cf11e85fSRoman Gushchin 		int node;
1374cf11e85fSRoman Gushchin 
1375953f064aSLi Xinhai 		if (hugetlb_cma[nid]) {
1376953f064aSLi Xinhai 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1377953f064aSLi Xinhai 					huge_page_order(h), true);
1378953f064aSLi Xinhai 			if (page)
1379953f064aSLi Xinhai 				return page;
1380953f064aSLi Xinhai 		}
1381953f064aSLi Xinhai 
1382953f064aSLi Xinhai 		if (!(gfp_mask & __GFP_THISNODE)) {
1383cf11e85fSRoman Gushchin 			for_each_node_mask(node, *nodemask) {
1384953f064aSLi Xinhai 				if (node == nid || !hugetlb_cma[node])
1385cf11e85fSRoman Gushchin 					continue;
1386cf11e85fSRoman Gushchin 
1387cf11e85fSRoman Gushchin 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1388cf11e85fSRoman Gushchin 						huge_page_order(h), true);
1389cf11e85fSRoman Gushchin 				if (page)
1390cf11e85fSRoman Gushchin 					return page;
1391cf11e85fSRoman Gushchin 			}
1392cf11e85fSRoman Gushchin 		}
1393953f064aSLi Xinhai 	}
1394dbda8feaSBarry Song #endif
1395cf11e85fSRoman Gushchin 
13965e27a2dfSAnshuman Khandual 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1397944d9fecSLuiz Capitulino }
1398944d9fecSLuiz Capitulino 
13994eb0716eSAlexandre Ghiti #else /* !CONFIG_CONTIG_ALLOC */
14004eb0716eSAlexandre Ghiti static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
14014eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
14024eb0716eSAlexandre Ghiti {
14034eb0716eSAlexandre Ghiti 	return NULL;
14044eb0716eSAlexandre Ghiti }
14054eb0716eSAlexandre Ghiti #endif /* CONFIG_CONTIG_ALLOC */
1406944d9fecSLuiz Capitulino 
1407e1073d1eSAneesh Kumar K.V #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1408d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
14094eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
14104eb0716eSAlexandre Ghiti {
14114eb0716eSAlexandre Ghiti 	return NULL;
14124eb0716eSAlexandre Ghiti }
1413d00181b9SKirill A. Shutemov static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1414944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page,
1415d00181b9SKirill A. Shutemov 						unsigned int order) { }
1416944d9fecSLuiz Capitulino #endif
1417944d9fecSLuiz Capitulino 
14186eb4e88aSMike Kravetz /*
14196eb4e88aSMike Kravetz  * Remove hugetlb page from lists, and update dtor so that page appears
142034d9e35bSMike Kravetz  * as just a compound page.
142134d9e35bSMike Kravetz  *
142234d9e35bSMike Kravetz  * A reference is held on the page, except in the case of demote.
14236eb4e88aSMike Kravetz  *
14246eb4e88aSMike Kravetz  * Must be called with hugetlb lock held.
14256eb4e88aSMike Kravetz  */
142634d9e35bSMike Kravetz static void __remove_hugetlb_page(struct hstate *h, struct page *page,
142734d9e35bSMike Kravetz 							bool adjust_surplus,
142834d9e35bSMike Kravetz 							bool demote)
14296eb4e88aSMike Kravetz {
14306eb4e88aSMike Kravetz 	int nid = page_to_nid(page);
14316eb4e88aSMike Kravetz 
14326eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
14336eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
14346eb4e88aSMike Kravetz 
14359487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
14366eb4e88aSMike Kravetz 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
14376eb4e88aSMike Kravetz 		return;
14386eb4e88aSMike Kravetz 
14396eb4e88aSMike Kravetz 	list_del(&page->lru);
14406eb4e88aSMike Kravetz 
14416eb4e88aSMike Kravetz 	if (HPageFreed(page)) {
14426eb4e88aSMike Kravetz 		h->free_huge_pages--;
14436eb4e88aSMike Kravetz 		h->free_huge_pages_node[nid]--;
14446eb4e88aSMike Kravetz 	}
14456eb4e88aSMike Kravetz 	if (adjust_surplus) {
14466eb4e88aSMike Kravetz 		h->surplus_huge_pages--;
14476eb4e88aSMike Kravetz 		h->surplus_huge_pages_node[nid]--;
14486eb4e88aSMike Kravetz 	}
14496eb4e88aSMike Kravetz 
1450e32d20c0SMike Kravetz 	/*
1451e32d20c0SMike Kravetz 	 * Very subtle
1452e32d20c0SMike Kravetz 	 *
1453e32d20c0SMike Kravetz 	 * For non-gigantic pages set the destructor to the normal compound
1454e32d20c0SMike Kravetz 	 * page dtor.  This is needed in case someone takes an additional
1455e32d20c0SMike Kravetz 	 * temporary ref to the page, and freeing is delayed until they drop
1456e32d20c0SMike Kravetz 	 * their reference.
1457e32d20c0SMike Kravetz 	 *
1458e32d20c0SMike Kravetz 	 * For gigantic pages set the destructor to the null dtor.  This
1459e32d20c0SMike Kravetz 	 * destructor will never be called.  Before freeing the gigantic
1460e32d20c0SMike Kravetz 	 * page destroy_compound_gigantic_page will turn the compound page
1461e32d20c0SMike Kravetz 	 * into a simple group of pages.  After this the destructor does not
1462e32d20c0SMike Kravetz 	 * apply.
1463e32d20c0SMike Kravetz 	 *
1464e32d20c0SMike Kravetz 	 * This handles the case where more than one ref is held when and
1465e32d20c0SMike Kravetz 	 * after update_and_free_page is called.
146634d9e35bSMike Kravetz 	 *
146734d9e35bSMike Kravetz 	 * In the case of demote we do not ref count the page as it will soon
146834d9e35bSMike Kravetz 	 * be turned into a page of smaller size.
1469e32d20c0SMike Kravetz 	 */
147034d9e35bSMike Kravetz 	if (!demote)
14716eb4e88aSMike Kravetz 		set_page_refcounted(page);
1472e32d20c0SMike Kravetz 	if (hstate_is_gigantic(h))
14736eb4e88aSMike Kravetz 		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1474e32d20c0SMike Kravetz 	else
1475e32d20c0SMike Kravetz 		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
14766eb4e88aSMike Kravetz 
14776eb4e88aSMike Kravetz 	h->nr_huge_pages--;
14786eb4e88aSMike Kravetz 	h->nr_huge_pages_node[nid]--;
14796eb4e88aSMike Kravetz }
14806eb4e88aSMike Kravetz 
148134d9e35bSMike Kravetz static void remove_hugetlb_page(struct hstate *h, struct page *page,
148234d9e35bSMike Kravetz 							bool adjust_surplus)
148334d9e35bSMike Kravetz {
148434d9e35bSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, false);
148534d9e35bSMike Kravetz }
148634d9e35bSMike Kravetz 
14878531fc6fSMike Kravetz static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
14888531fc6fSMike Kravetz 							bool adjust_surplus)
14898531fc6fSMike Kravetz {
14908531fc6fSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, true);
14918531fc6fSMike Kravetz }
14928531fc6fSMike Kravetz 
1493ad2fa371SMuchun Song static void add_hugetlb_page(struct hstate *h, struct page *page,
1494ad2fa371SMuchun Song 			     bool adjust_surplus)
1495ad2fa371SMuchun Song {
1496ad2fa371SMuchun Song 	int zeroed;
1497ad2fa371SMuchun Song 	int nid = page_to_nid(page);
1498ad2fa371SMuchun Song 
1499ad2fa371SMuchun Song 	VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1500ad2fa371SMuchun Song 
1501ad2fa371SMuchun Song 	lockdep_assert_held(&hugetlb_lock);
1502ad2fa371SMuchun Song 
1503ad2fa371SMuchun Song 	INIT_LIST_HEAD(&page->lru);
1504ad2fa371SMuchun Song 	h->nr_huge_pages++;
1505ad2fa371SMuchun Song 	h->nr_huge_pages_node[nid]++;
1506ad2fa371SMuchun Song 
1507ad2fa371SMuchun Song 	if (adjust_surplus) {
1508ad2fa371SMuchun Song 		h->surplus_huge_pages++;
1509ad2fa371SMuchun Song 		h->surplus_huge_pages_node[nid]++;
1510ad2fa371SMuchun Song 	}
1511ad2fa371SMuchun Song 
1512ad2fa371SMuchun Song 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1513ad2fa371SMuchun Song 	set_page_private(page, 0);
1514ad2fa371SMuchun Song 	SetHPageVmemmapOptimized(page);
1515ad2fa371SMuchun Song 
1516ad2fa371SMuchun Song 	/*
1517b65a4edaSMike Kravetz 	 * This page is about to be managed by the hugetlb allocator and
1518b65a4edaSMike Kravetz 	 * should have no users.  Drop our reference, and check for others
1519b65a4edaSMike Kravetz 	 * just in case.
1520ad2fa371SMuchun Song 	 */
1521ad2fa371SMuchun Song 	zeroed = put_page_testzero(page);
1522b65a4edaSMike Kravetz 	if (!zeroed)
1523b65a4edaSMike Kravetz 		/*
1524b65a4edaSMike Kravetz 		 * It is VERY unlikely soneone else has taken a ref on
1525b65a4edaSMike Kravetz 		 * the page.  In this case, we simply return as the
1526b65a4edaSMike Kravetz 		 * hugetlb destructor (free_huge_page) will be called
1527b65a4edaSMike Kravetz 		 * when this other ref is dropped.
1528b65a4edaSMike Kravetz 		 */
1529b65a4edaSMike Kravetz 		return;
1530b65a4edaSMike Kravetz 
1531ad2fa371SMuchun Song 	arch_clear_hugepage_flags(page);
1532ad2fa371SMuchun Song 	enqueue_huge_page(h, page);
1533ad2fa371SMuchun Song }
1534ad2fa371SMuchun Song 
1535b65d4adbSMuchun Song static void __update_and_free_page(struct hstate *h, struct page *page)
15366af2acb6SAdam Litke {
15376af2acb6SAdam Litke 	int i;
1538dbfee5aeSMike Kravetz 	struct page *subpage = page;
1539a5516438SAndi Kleen 
15404eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1541944d9fecSLuiz Capitulino 		return;
154218229df5SAndy Whitcroft 
15435981611dSMuchun Song 	if (hugetlb_vmemmap_alloc(h, page)) {
1544ad2fa371SMuchun Song 		spin_lock_irq(&hugetlb_lock);
1545ad2fa371SMuchun Song 		/*
1546ad2fa371SMuchun Song 		 * If we cannot allocate vmemmap pages, just refuse to free the
1547ad2fa371SMuchun Song 		 * page and put the page back on the hugetlb free list and treat
1548ad2fa371SMuchun Song 		 * as a surplus page.
1549ad2fa371SMuchun Song 		 */
1550ad2fa371SMuchun Song 		add_hugetlb_page(h, page, true);
1551ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
1552ad2fa371SMuchun Song 		return;
1553ad2fa371SMuchun Song 	}
1554ad2fa371SMuchun Song 
1555dbfee5aeSMike Kravetz 	for (i = 0; i < pages_per_huge_page(h);
1556dbfee5aeSMike Kravetz 	     i++, subpage = mem_map_next(subpage, page, i)) {
1557dbfee5aeSMike Kravetz 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
155832f84528SChris Forbes 				1 << PG_referenced | 1 << PG_dirty |
1559a7407a27SLuiz Capitulino 				1 << PG_active | 1 << PG_private |
1560a7407a27SLuiz Capitulino 				1 << PG_writeback);
15616af2acb6SAdam Litke 	}
1562a01f4390SMike Kravetz 
1563a01f4390SMike Kravetz 	/*
1564a01f4390SMike Kravetz 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1565a01f4390SMike Kravetz 	 * need to be given back to CMA in free_gigantic_page.
1566a01f4390SMike Kravetz 	 */
1567a01f4390SMike Kravetz 	if (hstate_is_gigantic(h) ||
1568a01f4390SMike Kravetz 	    hugetlb_cma_page(page, huge_page_order(h))) {
1569944d9fecSLuiz Capitulino 		destroy_compound_gigantic_page(page, huge_page_order(h));
1570944d9fecSLuiz Capitulino 		free_gigantic_page(page, huge_page_order(h));
1571944d9fecSLuiz Capitulino 	} else {
1572a5516438SAndi Kleen 		__free_pages(page, huge_page_order(h));
15736af2acb6SAdam Litke 	}
1574944d9fecSLuiz Capitulino }
15756af2acb6SAdam Litke 
1576b65d4adbSMuchun Song /*
1577b65d4adbSMuchun Song  * As update_and_free_page() can be called under any context, so we cannot
1578b65d4adbSMuchun Song  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1579b65d4adbSMuchun Song  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1580b65d4adbSMuchun Song  * the vmemmap pages.
1581b65d4adbSMuchun Song  *
1582b65d4adbSMuchun Song  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1583b65d4adbSMuchun Song  * freed and frees them one-by-one. As the page->mapping pointer is going
1584b65d4adbSMuchun Song  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1585b65d4adbSMuchun Song  * structure of a lockless linked list of huge pages to be freed.
1586b65d4adbSMuchun Song  */
1587b65d4adbSMuchun Song static LLIST_HEAD(hpage_freelist);
1588b65d4adbSMuchun Song 
1589b65d4adbSMuchun Song static void free_hpage_workfn(struct work_struct *work)
1590b65d4adbSMuchun Song {
1591b65d4adbSMuchun Song 	struct llist_node *node;
1592b65d4adbSMuchun Song 
1593b65d4adbSMuchun Song 	node = llist_del_all(&hpage_freelist);
1594b65d4adbSMuchun Song 
1595b65d4adbSMuchun Song 	while (node) {
1596b65d4adbSMuchun Song 		struct page *page;
1597b65d4adbSMuchun Song 		struct hstate *h;
1598b65d4adbSMuchun Song 
1599b65d4adbSMuchun Song 		page = container_of((struct address_space **)node,
1600b65d4adbSMuchun Song 				     struct page, mapping);
1601b65d4adbSMuchun Song 		node = node->next;
1602b65d4adbSMuchun Song 		page->mapping = NULL;
1603b65d4adbSMuchun Song 		/*
1604b65d4adbSMuchun Song 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1605b65d4adbSMuchun Song 		 * is going to trigger because a previous call to
1606b65d4adbSMuchun Song 		 * remove_hugetlb_page() will set_compound_page_dtor(page,
1607b65d4adbSMuchun Song 		 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1608b65d4adbSMuchun Song 		 */
1609b65d4adbSMuchun Song 		h = size_to_hstate(page_size(page));
1610b65d4adbSMuchun Song 
1611b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1612b65d4adbSMuchun Song 
1613b65d4adbSMuchun Song 		cond_resched();
1614b65d4adbSMuchun Song 	}
1615b65d4adbSMuchun Song }
1616b65d4adbSMuchun Song static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1617b65d4adbSMuchun Song 
1618b65d4adbSMuchun Song static inline void flush_free_hpage_work(struct hstate *h)
1619b65d4adbSMuchun Song {
16205981611dSMuchun Song 	if (hugetlb_optimize_vmemmap_pages(h))
1621b65d4adbSMuchun Song 		flush_work(&free_hpage_work);
1622b65d4adbSMuchun Song }
1623b65d4adbSMuchun Song 
1624b65d4adbSMuchun Song static void update_and_free_page(struct hstate *h, struct page *page,
1625b65d4adbSMuchun Song 				 bool atomic)
1626b65d4adbSMuchun Song {
1627ad2fa371SMuchun Song 	if (!HPageVmemmapOptimized(page) || !atomic) {
1628b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1629b65d4adbSMuchun Song 		return;
1630b65d4adbSMuchun Song 	}
1631b65d4adbSMuchun Song 
1632b65d4adbSMuchun Song 	/*
1633b65d4adbSMuchun Song 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1634b65d4adbSMuchun Song 	 *
1635b65d4adbSMuchun Song 	 * Only call schedule_work() if hpage_freelist is previously
1636b65d4adbSMuchun Song 	 * empty. Otherwise, schedule_work() had been called but the workfn
1637b65d4adbSMuchun Song 	 * hasn't retrieved the list yet.
1638b65d4adbSMuchun Song 	 */
1639b65d4adbSMuchun Song 	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1640b65d4adbSMuchun Song 		schedule_work(&free_hpage_work);
1641b65d4adbSMuchun Song }
1642b65d4adbSMuchun Song 
164310c6ec49SMike Kravetz static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
164410c6ec49SMike Kravetz {
164510c6ec49SMike Kravetz 	struct page *page, *t_page;
164610c6ec49SMike Kravetz 
164710c6ec49SMike Kravetz 	list_for_each_entry_safe(page, t_page, list, lru) {
1648b65d4adbSMuchun Song 		update_and_free_page(h, page, false);
164910c6ec49SMike Kravetz 		cond_resched();
165010c6ec49SMike Kravetz 	}
165110c6ec49SMike Kravetz }
165210c6ec49SMike Kravetz 
1653e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1654e5ff2159SAndi Kleen {
1655e5ff2159SAndi Kleen 	struct hstate *h;
1656e5ff2159SAndi Kleen 
1657e5ff2159SAndi Kleen 	for_each_hstate(h) {
1658e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
1659e5ff2159SAndi Kleen 			return h;
1660e5ff2159SAndi Kleen 	}
1661e5ff2159SAndi Kleen 	return NULL;
1662e5ff2159SAndi Kleen }
1663e5ff2159SAndi Kleen 
1664db71ef79SMike Kravetz void free_huge_page(struct page *page)
166527a85ef1SDavid Gibson {
1666a5516438SAndi Kleen 	/*
1667a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
1668a5516438SAndi Kleen 	 * compound page destructor.
1669a5516438SAndi Kleen 	 */
1670e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
16717893d1d5SAdam Litke 	int nid = page_to_nid(page);
1672d6995da3SMike Kravetz 	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
167307443a85SJoonsoo Kim 	bool restore_reserve;
1674db71ef79SMike Kravetz 	unsigned long flags;
167527a85ef1SDavid Gibson 
1676b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1677b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_mapcount(page), page);
16788ace22bcSYongkai Wu 
1679d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
168078fbe906SDavid Hildenbrand 	if (PageAnon(page))
168178fbe906SDavid Hildenbrand 		__ClearPageAnonExclusive(page);
16828ace22bcSYongkai Wu 	page->mapping = NULL;
1683d6995da3SMike Kravetz 	restore_reserve = HPageRestoreReserve(page);
1684d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
168527a85ef1SDavid Gibson 
16861c5ecae3SMike Kravetz 	/*
1687d6995da3SMike Kravetz 	 * If HPageRestoreReserve was set on page, page allocation consumed a
16880919e1b6SMike Kravetz 	 * reservation.  If the page was associated with a subpool, there
16890919e1b6SMike Kravetz 	 * would have been a page reserved in the subpool before allocation
16900919e1b6SMike Kravetz 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
16916c26d310SMiaohe Lin 	 * reservation, do not call hugepage_subpool_put_pages() as this will
16920919e1b6SMike Kravetz 	 * remove the reserved page from the subpool.
16930919e1b6SMike Kravetz 	 */
16940919e1b6SMike Kravetz 	if (!restore_reserve) {
16950919e1b6SMike Kravetz 		/*
16960919e1b6SMike Kravetz 		 * A return code of zero implies that the subpool will be
16970919e1b6SMike Kravetz 		 * under its minimum size if the reservation is not restored
16980919e1b6SMike Kravetz 		 * after page is free.  Therefore, force restore_reserve
16990919e1b6SMike Kravetz 		 * operation.
17001c5ecae3SMike Kravetz 		 */
17011c5ecae3SMike Kravetz 		if (hugepage_subpool_put_pages(spool, 1) == 0)
17021c5ecae3SMike Kravetz 			restore_reserve = true;
17030919e1b6SMike Kravetz 	}
17041c5ecae3SMike Kravetz 
1705db71ef79SMike Kravetz 	spin_lock_irqsave(&hugetlb_lock, flags);
17068f251a3dSMike Kravetz 	ClearHPageMigratable(page);
17076d76dcf4SAneesh Kumar K.V 	hugetlb_cgroup_uncharge_page(hstate_index(h),
17086d76dcf4SAneesh Kumar K.V 				     pages_per_huge_page(h), page);
170908cf9fafSMina Almasry 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
171008cf9fafSMina Almasry 					  pages_per_huge_page(h), page);
171107443a85SJoonsoo Kim 	if (restore_reserve)
171207443a85SJoonsoo Kim 		h->resv_huge_pages++;
171307443a85SJoonsoo Kim 
17149157c311SMike Kravetz 	if (HPageTemporary(page)) {
17156eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, false);
1716db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1717b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
1718ab5ac90aSMichal Hocko 	} else if (h->surplus_huge_pages_node[nid]) {
17190edaecfaSAneesh Kumar K.V 		/* remove the page from active list */
17206eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, true);
1721db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1722b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
17237893d1d5SAdam Litke 	} else {
17245d3a551cSWill Deacon 		arch_clear_hugepage_flags(page);
1725a5516438SAndi Kleen 		enqueue_huge_page(h, page);
1726db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
172727a85ef1SDavid Gibson 	}
17281121828aSMike Kravetz }
172927a85ef1SDavid Gibson 
1730d3d99fccSOscar Salvador /*
1731d3d99fccSOscar Salvador  * Must be called with the hugetlb lock held
1732d3d99fccSOscar Salvador  */
1733d3d99fccSOscar Salvador static void __prep_account_new_huge_page(struct hstate *h, int nid)
1734d3d99fccSOscar Salvador {
1735d3d99fccSOscar Salvador 	lockdep_assert_held(&hugetlb_lock);
1736d3d99fccSOscar Salvador 	h->nr_huge_pages++;
1737d3d99fccSOscar Salvador 	h->nr_huge_pages_node[nid]++;
1738d3d99fccSOscar Salvador }
1739d3d99fccSOscar Salvador 
1740f41f2ed4SMuchun Song static void __prep_new_huge_page(struct hstate *h, struct page *page)
1741b7ba30c6SAndi Kleen {
17425981611dSMuchun Song 	hugetlb_vmemmap_free(h, page);
17430edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&page->lru);
1744f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1745ff546117SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
17469dd540e2SAneesh Kumar K.V 	set_hugetlb_cgroup(page, NULL);
17471adc4d41SMina Almasry 	set_hugetlb_cgroup_rsvd(page, NULL);
1748d3d99fccSOscar Salvador }
1749d3d99fccSOscar Salvador 
1750d3d99fccSOscar Salvador static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1751d3d99fccSOscar Salvador {
1752f41f2ed4SMuchun Song 	__prep_new_huge_page(h, page);
1753db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
1754d3d99fccSOscar Salvador 	__prep_account_new_huge_page(h, nid);
1755db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
1756b7ba30c6SAndi Kleen }
1757b7ba30c6SAndi Kleen 
175834d9e35bSMike Kravetz static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
175934d9e35bSMike Kravetz 								bool demote)
176020a0307cSWu Fengguang {
17617118fc29SMike Kravetz 	int i, j;
176220a0307cSWu Fengguang 	int nr_pages = 1 << order;
176320a0307cSWu Fengguang 	struct page *p = page + 1;
176420a0307cSWu Fengguang 
176520a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
176620a0307cSWu Fengguang 	set_compound_order(page, order);
1767ef5a22beSAndrea Arcangeli 	__ClearPageReserved(page);
1768de09d31dSKirill A. Shutemov 	__SetPageHead(page);
176920a0307cSWu Fengguang 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1770ef5a22beSAndrea Arcangeli 		/*
1771ef5a22beSAndrea Arcangeli 		 * For gigantic hugepages allocated through bootmem at
1772ef5a22beSAndrea Arcangeli 		 * boot, it's safer to be consistent with the not-gigantic
1773ef5a22beSAndrea Arcangeli 		 * hugepages and clear the PG_reserved bit from all tail pages
17747c8de358SEthon Paul 		 * too.  Otherwise drivers using get_user_pages() to access tail
1775ef5a22beSAndrea Arcangeli 		 * pages may get the reference counting wrong if they see
1776ef5a22beSAndrea Arcangeli 		 * PG_reserved set on a tail page (despite the head page not
1777ef5a22beSAndrea Arcangeli 		 * having PG_reserved set).  Enforcing this consistency between
1778ef5a22beSAndrea Arcangeli 		 * head and tail pages allows drivers to optimize away a check
1779ef5a22beSAndrea Arcangeli 		 * on the head page when they need know if put_page() is needed
1780ef5a22beSAndrea Arcangeli 		 * after get_user_pages().
1781ef5a22beSAndrea Arcangeli 		 */
1782ef5a22beSAndrea Arcangeli 		__ClearPageReserved(p);
17837118fc29SMike Kravetz 		/*
17847118fc29SMike Kravetz 		 * Subtle and very unlikely
17857118fc29SMike Kravetz 		 *
17867118fc29SMike Kravetz 		 * Gigantic 'page allocators' such as memblock or cma will
17877118fc29SMike Kravetz 		 * return a set of pages with each page ref counted.  We need
17887118fc29SMike Kravetz 		 * to turn this set of pages into a compound page with tail
17897118fc29SMike Kravetz 		 * page ref counts set to zero.  Code such as speculative page
17907118fc29SMike Kravetz 		 * cache adding could take a ref on a 'to be' tail page.
17917118fc29SMike Kravetz 		 * We need to respect any increased ref count, and only set
17927118fc29SMike Kravetz 		 * the ref count to zero if count is currently 1.  If count
1793416d85edSMike Kravetz 		 * is not 1, we return an error.  An error return indicates
1794416d85edSMike Kravetz 		 * the set of pages can not be converted to a gigantic page.
1795416d85edSMike Kravetz 		 * The caller who allocated the pages should then discard the
1796416d85edSMike Kravetz 		 * pages using the appropriate free interface.
179734d9e35bSMike Kravetz 		 *
179834d9e35bSMike Kravetz 		 * In the case of demote, the ref count will be zero.
17997118fc29SMike Kravetz 		 */
180034d9e35bSMike Kravetz 		if (!demote) {
18017118fc29SMike Kravetz 			if (!page_ref_freeze(p, 1)) {
1802416d85edSMike Kravetz 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
18037118fc29SMike Kravetz 				goto out_error;
18047118fc29SMike Kravetz 			}
180534d9e35bSMike Kravetz 		} else {
180634d9e35bSMike Kravetz 			VM_BUG_ON_PAGE(page_count(p), p);
180734d9e35bSMike Kravetz 		}
18081d798ca3SKirill A. Shutemov 		set_compound_head(p, page);
180920a0307cSWu Fengguang 	}
1810b4330afbSMike Kravetz 	atomic_set(compound_mapcount_ptr(page), -1);
181147e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
18127118fc29SMike Kravetz 	return true;
18137118fc29SMike Kravetz 
18147118fc29SMike Kravetz out_error:
18157118fc29SMike Kravetz 	/* undo tail page modifications made above */
18167118fc29SMike Kravetz 	p = page + 1;
18177118fc29SMike Kravetz 	for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
18187118fc29SMike Kravetz 		clear_compound_head(p);
18197118fc29SMike Kravetz 		set_page_refcounted(p);
18207118fc29SMike Kravetz 	}
18217118fc29SMike Kravetz 	/* need to clear PG_reserved on remaining tail pages  */
18227118fc29SMike Kravetz 	for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
18237118fc29SMike Kravetz 		__ClearPageReserved(p);
18247118fc29SMike Kravetz 	set_compound_order(page, 0);
18255232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
18267118fc29SMike Kravetz 	page[1].compound_nr = 0;
18275232c63fSMatthew Wilcox (Oracle) #endif
18287118fc29SMike Kravetz 	__ClearPageHead(page);
18297118fc29SMike Kravetz 	return false;
183020a0307cSWu Fengguang }
183120a0307cSWu Fengguang 
183234d9e35bSMike Kravetz static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
183334d9e35bSMike Kravetz {
183434d9e35bSMike Kravetz 	return __prep_compound_gigantic_page(page, order, false);
183534d9e35bSMike Kravetz }
183634d9e35bSMike Kravetz 
18378531fc6fSMike Kravetz static bool prep_compound_gigantic_page_for_demote(struct page *page,
18388531fc6fSMike Kravetz 							unsigned int order)
18398531fc6fSMike Kravetz {
18408531fc6fSMike Kravetz 	return __prep_compound_gigantic_page(page, order, true);
18418531fc6fSMike Kravetz }
18428531fc6fSMike Kravetz 
18437795912cSAndrew Morton /*
18447795912cSAndrew Morton  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
18457795912cSAndrew Morton  * transparent huge pages.  See the PageTransHuge() documentation for more
18467795912cSAndrew Morton  * details.
18477795912cSAndrew Morton  */
184820a0307cSWu Fengguang int PageHuge(struct page *page)
184920a0307cSWu Fengguang {
185020a0307cSWu Fengguang 	if (!PageCompound(page))
185120a0307cSWu Fengguang 		return 0;
185220a0307cSWu Fengguang 
185320a0307cSWu Fengguang 	page = compound_head(page);
1854f1e61557SKirill A. Shutemov 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
185520a0307cSWu Fengguang }
185643131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge);
185743131e14SNaoya Horiguchi 
185827c73ae7SAndrea Arcangeli /*
185927c73ae7SAndrea Arcangeli  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
186027c73ae7SAndrea Arcangeli  * normal or transparent huge pages.
186127c73ae7SAndrea Arcangeli  */
186227c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head)
186327c73ae7SAndrea Arcangeli {
186427c73ae7SAndrea Arcangeli 	if (!PageHead(page_head))
186527c73ae7SAndrea Arcangeli 		return 0;
186627c73ae7SAndrea Arcangeli 
1867d4af73e3SVlastimil Babka 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
186827c73ae7SAndrea Arcangeli }
18694e936eccSDavid Howells EXPORT_SYMBOL_GPL(PageHeadHuge);
187027c73ae7SAndrea Arcangeli 
1871c0d0381aSMike Kravetz /*
1872c0d0381aSMike Kravetz  * Find and lock address space (mapping) in write mode.
1873c0d0381aSMike Kravetz  *
1874336bf30eSMike Kravetz  * Upon entry, the page is locked which means that page_mapping() is
1875336bf30eSMike Kravetz  * stable.  Due to locking order, we can only trylock_write.  If we can
1876336bf30eSMike Kravetz  * not get the lock, simply return NULL to caller.
1877c0d0381aSMike Kravetz  */
1878c0d0381aSMike Kravetz struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1879c0d0381aSMike Kravetz {
1880336bf30eSMike Kravetz 	struct address_space *mapping = page_mapping(hpage);
1881c0d0381aSMike Kravetz 
1882c0d0381aSMike Kravetz 	if (!mapping)
1883c0d0381aSMike Kravetz 		return mapping;
1884c0d0381aSMike Kravetz 
1885c0d0381aSMike Kravetz 	if (i_mmap_trylock_write(mapping))
1886c0d0381aSMike Kravetz 		return mapping;
1887c0d0381aSMike Kravetz 
1888c0d0381aSMike Kravetz 	return NULL;
1889c0d0381aSMike Kravetz }
1890c0d0381aSMike Kravetz 
1891fe19bd3dSHugh Dickins pgoff_t hugetlb_basepage_index(struct page *page)
189213d60f4bSZhang Yi {
189313d60f4bSZhang Yi 	struct page *page_head = compound_head(page);
189413d60f4bSZhang Yi 	pgoff_t index = page_index(page_head);
189513d60f4bSZhang Yi 	unsigned long compound_idx;
189613d60f4bSZhang Yi 
189713d60f4bSZhang Yi 	if (compound_order(page_head) >= MAX_ORDER)
189813d60f4bSZhang Yi 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
189913d60f4bSZhang Yi 	else
190013d60f4bSZhang Yi 		compound_idx = page - page_head;
190113d60f4bSZhang Yi 
190213d60f4bSZhang Yi 	return (index << compound_order(page_head)) + compound_idx;
190313d60f4bSZhang Yi }
190413d60f4bSZhang Yi 
19050c397daeSMichal Hocko static struct page *alloc_buddy_huge_page(struct hstate *h,
1906f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1907f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19081da177e4SLinus Torvalds {
1909af0fb9dfSMichal Hocko 	int order = huge_page_order(h);
19101da177e4SLinus Torvalds 	struct page *page;
1911f60858f9SMike Kravetz 	bool alloc_try_hard = true;
1912f96efd58SJoe Jin 
1913f60858f9SMike Kravetz 	/*
1914f60858f9SMike Kravetz 	 * By default we always try hard to allocate the page with
1915f60858f9SMike Kravetz 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1916f60858f9SMike Kravetz 	 * a loop (to adjust global huge page counts) and previous allocation
1917f60858f9SMike Kravetz 	 * failed, do not continue to try hard on the same node.  Use the
1918f60858f9SMike Kravetz 	 * node_alloc_noretry bitmap to manage this state information.
1919f60858f9SMike Kravetz 	 */
1920f60858f9SMike Kravetz 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1921f60858f9SMike Kravetz 		alloc_try_hard = false;
1922f60858f9SMike Kravetz 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1923f60858f9SMike Kravetz 	if (alloc_try_hard)
1924f60858f9SMike Kravetz 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1925af0fb9dfSMichal Hocko 	if (nid == NUMA_NO_NODE)
1926af0fb9dfSMichal Hocko 		nid = numa_mem_id();
192784172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1928af0fb9dfSMichal Hocko 	if (page)
1929af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1930af0fb9dfSMichal Hocko 	else
1931af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
193263b4613cSNishanth Aravamudan 
1933f60858f9SMike Kravetz 	/*
1934f60858f9SMike Kravetz 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1935f60858f9SMike Kravetz 	 * indicates an overall state change.  Clear bit so that we resume
1936f60858f9SMike Kravetz 	 * normal 'try hard' allocations.
1937f60858f9SMike Kravetz 	 */
1938f60858f9SMike Kravetz 	if (node_alloc_noretry && page && !alloc_try_hard)
1939f60858f9SMike Kravetz 		node_clear(nid, *node_alloc_noretry);
1940f60858f9SMike Kravetz 
1941f60858f9SMike Kravetz 	/*
1942f60858f9SMike Kravetz 	 * If we tried hard to get a page but failed, set bit so that
1943f60858f9SMike Kravetz 	 * subsequent attempts will not try as hard until there is an
1944f60858f9SMike Kravetz 	 * overall state change.
1945f60858f9SMike Kravetz 	 */
1946f60858f9SMike Kravetz 	if (node_alloc_noretry && !page && alloc_try_hard)
1947f60858f9SMike Kravetz 		node_set(nid, *node_alloc_noretry);
1948f60858f9SMike Kravetz 
194963b4613cSNishanth Aravamudan 	return page;
195063b4613cSNishanth Aravamudan }
195163b4613cSNishanth Aravamudan 
1952af0fb9dfSMichal Hocko /*
19530c397daeSMichal Hocko  * Common helper to allocate a fresh hugetlb page. All specific allocators
19540c397daeSMichal Hocko  * should use this function to get new hugetlb pages
19550c397daeSMichal Hocko  */
19560c397daeSMichal Hocko static struct page *alloc_fresh_huge_page(struct hstate *h,
1957f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1958f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19590c397daeSMichal Hocko {
19600c397daeSMichal Hocko 	struct page *page;
19617118fc29SMike Kravetz 	bool retry = false;
19620c397daeSMichal Hocko 
19637118fc29SMike Kravetz retry:
19640c397daeSMichal Hocko 	if (hstate_is_gigantic(h))
19650c397daeSMichal Hocko 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
19660c397daeSMichal Hocko 	else
19670c397daeSMichal Hocko 		page = alloc_buddy_huge_page(h, gfp_mask,
1968f60858f9SMike Kravetz 				nid, nmask, node_alloc_noretry);
19690c397daeSMichal Hocko 	if (!page)
19700c397daeSMichal Hocko 		return NULL;
19710c397daeSMichal Hocko 
19727118fc29SMike Kravetz 	if (hstate_is_gigantic(h)) {
19737118fc29SMike Kravetz 		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
19747118fc29SMike Kravetz 			/*
19757118fc29SMike Kravetz 			 * Rare failure to convert pages to compound page.
19767118fc29SMike Kravetz 			 * Free pages and try again - ONCE!
19777118fc29SMike Kravetz 			 */
19787118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
19797118fc29SMike Kravetz 			if (!retry) {
19807118fc29SMike Kravetz 				retry = true;
19817118fc29SMike Kravetz 				goto retry;
19827118fc29SMike Kravetz 			}
19837118fc29SMike Kravetz 			return NULL;
19847118fc29SMike Kravetz 		}
19857118fc29SMike Kravetz 	}
19860c397daeSMichal Hocko 	prep_new_huge_page(h, page, page_to_nid(page));
19870c397daeSMichal Hocko 
19880c397daeSMichal Hocko 	return page;
19890c397daeSMichal Hocko }
19900c397daeSMichal Hocko 
19910c397daeSMichal Hocko /*
1992af0fb9dfSMichal Hocko  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1993af0fb9dfSMichal Hocko  * manner.
1994af0fb9dfSMichal Hocko  */
1995f60858f9SMike Kravetz static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1996f60858f9SMike Kravetz 				nodemask_t *node_alloc_noretry)
1997b2261026SJoonsoo Kim {
1998b2261026SJoonsoo Kim 	struct page *page;
1999b2261026SJoonsoo Kim 	int nr_nodes, node;
2000af0fb9dfSMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2001b2261026SJoonsoo Kim 
2002b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2003f60858f9SMike Kravetz 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
2004f60858f9SMike Kravetz 						node_alloc_noretry);
2005af0fb9dfSMichal Hocko 		if (page)
2006b2261026SJoonsoo Kim 			break;
2007b2261026SJoonsoo Kim 	}
2008b2261026SJoonsoo Kim 
2009af0fb9dfSMichal Hocko 	if (!page)
2010af0fb9dfSMichal Hocko 		return 0;
2011b2261026SJoonsoo Kim 
2012af0fb9dfSMichal Hocko 	put_page(page); /* free it into the hugepage allocator */
2013af0fb9dfSMichal Hocko 
2014af0fb9dfSMichal Hocko 	return 1;
2015b2261026SJoonsoo Kim }
2016b2261026SJoonsoo Kim 
2017e8c5c824SLee Schermerhorn /*
201810c6ec49SMike Kravetz  * Remove huge page from pool from next node to free.  Attempt to keep
201910c6ec49SMike Kravetz  * persistent huge pages more or less balanced over allowed nodes.
202010c6ec49SMike Kravetz  * This routine only 'removes' the hugetlb page.  The caller must make
202110c6ec49SMike Kravetz  * an additional call to free the page to low level allocators.
2022e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
2023e8c5c824SLee Schermerhorn  */
202410c6ec49SMike Kravetz static struct page *remove_pool_huge_page(struct hstate *h,
202510c6ec49SMike Kravetz 						nodemask_t *nodes_allowed,
20266ae11b27SLee Schermerhorn 						 bool acct_surplus)
2027e8c5c824SLee Schermerhorn {
2028b2261026SJoonsoo Kim 	int nr_nodes, node;
202910c6ec49SMike Kravetz 	struct page *page = NULL;
2030e8c5c824SLee Schermerhorn 
20319487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2032b2261026SJoonsoo Kim 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2033685f3457SLee Schermerhorn 		/*
2034685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
2035685f3457SLee Schermerhorn 		 * nodes with surplus pages.
2036685f3457SLee Schermerhorn 		 */
2037b2261026SJoonsoo Kim 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2038b2261026SJoonsoo Kim 		    !list_empty(&h->hugepage_freelists[node])) {
203910c6ec49SMike Kravetz 			page = list_entry(h->hugepage_freelists[node].next,
2040e8c5c824SLee Schermerhorn 					  struct page, lru);
20416eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, acct_surplus);
20429a76db09SLee Schermerhorn 			break;
2043e8c5c824SLee Schermerhorn 		}
2044b2261026SJoonsoo Kim 	}
2045e8c5c824SLee Schermerhorn 
204610c6ec49SMike Kravetz 	return page;
2047e8c5c824SLee Schermerhorn }
2048e8c5c824SLee Schermerhorn 
2049c8721bbbSNaoya Horiguchi /*
2050c8721bbbSNaoya Horiguchi  * Dissolve a given free hugepage into free buddy pages. This function does
2051faf53defSNaoya Horiguchi  * nothing for in-use hugepages and non-hugepages.
2052faf53defSNaoya Horiguchi  * This function returns values like below:
2053faf53defSNaoya Horiguchi  *
2054ad2fa371SMuchun Song  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2055ad2fa371SMuchun Song  *           when the system is under memory pressure and the feature of
2056ad2fa371SMuchun Song  *           freeing unused vmemmap pages associated with each hugetlb page
2057ad2fa371SMuchun Song  *           is enabled.
2058faf53defSNaoya Horiguchi  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2059faf53defSNaoya Horiguchi  *           (allocated or reserved.)
2060faf53defSNaoya Horiguchi  *       0:  successfully dissolved free hugepages or the page is not a
2061faf53defSNaoya Horiguchi  *           hugepage (considered as already dissolved)
2062c8721bbbSNaoya Horiguchi  */
2063c3114a84SAnshuman Khandual int dissolve_free_huge_page(struct page *page)
2064c8721bbbSNaoya Horiguchi {
20656bc9b564SNaoya Horiguchi 	int rc = -EBUSY;
2066082d5b6bSGerald Schaefer 
20677ffddd49SMuchun Song retry:
2068faf53defSNaoya Horiguchi 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2069faf53defSNaoya Horiguchi 	if (!PageHuge(page))
2070faf53defSNaoya Horiguchi 		return 0;
2071faf53defSNaoya Horiguchi 
2072db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2073faf53defSNaoya Horiguchi 	if (!PageHuge(page)) {
2074faf53defSNaoya Horiguchi 		rc = 0;
2075faf53defSNaoya Horiguchi 		goto out;
2076faf53defSNaoya Horiguchi 	}
2077faf53defSNaoya Horiguchi 
2078faf53defSNaoya Horiguchi 	if (!page_count(page)) {
20792247bb33SGerald Schaefer 		struct page *head = compound_head(page);
20802247bb33SGerald Schaefer 		struct hstate *h = page_hstate(head);
20816bc9b564SNaoya Horiguchi 		if (h->free_huge_pages - h->resv_huge_pages == 0)
2082082d5b6bSGerald Schaefer 			goto out;
20837ffddd49SMuchun Song 
20847ffddd49SMuchun Song 		/*
20857ffddd49SMuchun Song 		 * We should make sure that the page is already on the free list
20867ffddd49SMuchun Song 		 * when it is dissolved.
20877ffddd49SMuchun Song 		 */
20886c037149SMike Kravetz 		if (unlikely(!HPageFreed(head))) {
2089db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
20907ffddd49SMuchun Song 			cond_resched();
20917ffddd49SMuchun Song 
20927ffddd49SMuchun Song 			/*
20937ffddd49SMuchun Song 			 * Theoretically, we should return -EBUSY when we
20947ffddd49SMuchun Song 			 * encounter this race. In fact, we have a chance
20957ffddd49SMuchun Song 			 * to successfully dissolve the page if we do a
20967ffddd49SMuchun Song 			 * retry. Because the race window is quite small.
20977ffddd49SMuchun Song 			 * If we seize this opportunity, it is an optimization
20987ffddd49SMuchun Song 			 * for increasing the success rate of dissolving page.
20997ffddd49SMuchun Song 			 */
21007ffddd49SMuchun Song 			goto retry;
21017ffddd49SMuchun Song 		}
21027ffddd49SMuchun Song 
2103ad2fa371SMuchun Song 		remove_hugetlb_page(h, head, false);
2104ad2fa371SMuchun Song 		h->max_huge_pages--;
2105ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
2106ad2fa371SMuchun Song 
2107c3114a84SAnshuman Khandual 		/*
2108ad2fa371SMuchun Song 		 * Normally update_and_free_page will allocate required vmemmmap
2109ad2fa371SMuchun Song 		 * before freeing the page.  update_and_free_page will fail to
2110ad2fa371SMuchun Song 		 * free the page if it can not allocate required vmemmap.  We
2111ad2fa371SMuchun Song 		 * need to adjust max_huge_pages if the page is not freed.
2112ad2fa371SMuchun Song 		 * Attempt to allocate vmemmmap here so that we can take
2113ad2fa371SMuchun Song 		 * appropriate action on failure.
2114ad2fa371SMuchun Song 		 */
21155981611dSMuchun Song 		rc = hugetlb_vmemmap_alloc(h, head);
2116ad2fa371SMuchun Song 		if (!rc) {
2117ad2fa371SMuchun Song 			/*
2118ad2fa371SMuchun Song 			 * Move PageHWPoison flag from head page to the raw
2119ad2fa371SMuchun Song 			 * error page, which makes any subpages rather than
2120ad2fa371SMuchun Song 			 * the error page reusable.
2121c3114a84SAnshuman Khandual 			 */
2122c3114a84SAnshuman Khandual 			if (PageHWPoison(head) && page != head) {
2123c3114a84SAnshuman Khandual 				SetPageHWPoison(page);
2124c3114a84SAnshuman Khandual 				ClearPageHWPoison(head);
2125c3114a84SAnshuman Khandual 			}
2126b65d4adbSMuchun Song 			update_and_free_page(h, head, false);
2127ad2fa371SMuchun Song 		} else {
2128ad2fa371SMuchun Song 			spin_lock_irq(&hugetlb_lock);
2129ad2fa371SMuchun Song 			add_hugetlb_page(h, head, false);
2130ad2fa371SMuchun Song 			h->max_huge_pages++;
2131ad2fa371SMuchun Song 			spin_unlock_irq(&hugetlb_lock);
2132ad2fa371SMuchun Song 		}
2133ad2fa371SMuchun Song 
2134ad2fa371SMuchun Song 		return rc;
2135c8721bbbSNaoya Horiguchi 	}
2136082d5b6bSGerald Schaefer out:
2137db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2138082d5b6bSGerald Schaefer 	return rc;
2139c8721bbbSNaoya Horiguchi }
2140c8721bbbSNaoya Horiguchi 
2141c8721bbbSNaoya Horiguchi /*
2142c8721bbbSNaoya Horiguchi  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2143c8721bbbSNaoya Horiguchi  * make specified memory blocks removable from the system.
21442247bb33SGerald Schaefer  * Note that this will dissolve a free gigantic hugepage completely, if any
21452247bb33SGerald Schaefer  * part of it lies within the given range.
2146082d5b6bSGerald Schaefer  * Also note that if dissolve_free_huge_page() returns with an error, all
2147082d5b6bSGerald Schaefer  * free hugepages that were dissolved before that error are lost.
2148c8721bbbSNaoya Horiguchi  */
2149082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2150c8721bbbSNaoya Horiguchi {
2151c8721bbbSNaoya Horiguchi 	unsigned long pfn;
2152eb03aa00SGerald Schaefer 	struct page *page;
2153082d5b6bSGerald Schaefer 	int rc = 0;
2154c8721bbbSNaoya Horiguchi 
2155d0177639SLi Zhong 	if (!hugepages_supported())
2156082d5b6bSGerald Schaefer 		return rc;
2157d0177639SLi Zhong 
2158eb03aa00SGerald Schaefer 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
2159eb03aa00SGerald Schaefer 		page = pfn_to_page(pfn);
2160eb03aa00SGerald Schaefer 		rc = dissolve_free_huge_page(page);
2161eb03aa00SGerald Schaefer 		if (rc)
2162082d5b6bSGerald Schaefer 			break;
2163eb03aa00SGerald Schaefer 	}
2164082d5b6bSGerald Schaefer 
2165082d5b6bSGerald Schaefer 	return rc;
2166c8721bbbSNaoya Horiguchi }
2167c8721bbbSNaoya Horiguchi 
2168ab5ac90aSMichal Hocko /*
2169ab5ac90aSMichal Hocko  * Allocates a fresh surplus page from the page allocator.
2170ab5ac90aSMichal Hocko  */
21710c397daeSMichal Hocko static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2172b65a4edaSMike Kravetz 		int nid, nodemask_t *nmask, bool zero_ref)
21737893d1d5SAdam Litke {
21749980d744SMichal Hocko 	struct page *page = NULL;
2175b65a4edaSMike Kravetz 	bool retry = false;
21767893d1d5SAdam Litke 
2177bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2178aa888a74SAndi Kleen 		return NULL;
2179aa888a74SAndi Kleen 
2180db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
21819980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
21829980d744SMichal Hocko 		goto out_unlock;
2183db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2184d1c3fb1fSNishanth Aravamudan 
2185b65a4edaSMike Kravetz retry:
2186f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
21879980d744SMichal Hocko 	if (!page)
21880c397daeSMichal Hocko 		return NULL;
2189d1c3fb1fSNishanth Aravamudan 
2190db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
21919980d744SMichal Hocko 	/*
21929980d744SMichal Hocko 	 * We could have raced with the pool size change.
21939980d744SMichal Hocko 	 * Double check that and simply deallocate the new page
21949980d744SMichal Hocko 	 * if we would end up overcommiting the surpluses. Abuse
21959980d744SMichal Hocko 	 * temporary page to workaround the nasty free_huge_page
21969980d744SMichal Hocko 	 * codeflow
21979980d744SMichal Hocko 	 */
21989980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
21999157c311SMike Kravetz 		SetHPageTemporary(page);
2200db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
22019980d744SMichal Hocko 		put_page(page);
22022bf753e6SKai Shen 		return NULL;
2203b65a4edaSMike Kravetz 	}
2204b65a4edaSMike Kravetz 
2205b65a4edaSMike Kravetz 	if (zero_ref) {
2206b65a4edaSMike Kravetz 		/*
2207b65a4edaSMike Kravetz 		 * Caller requires a page with zero ref count.
2208b65a4edaSMike Kravetz 		 * We will drop ref count here.  If someone else is holding
2209b65a4edaSMike Kravetz 		 * a ref, the page will be freed when they drop it.  Abuse
2210b65a4edaSMike Kravetz 		 * temporary page flag to accomplish this.
2211b65a4edaSMike Kravetz 		 */
2212b65a4edaSMike Kravetz 		SetHPageTemporary(page);
2213b65a4edaSMike Kravetz 		if (!put_page_testzero(page)) {
2214b65a4edaSMike Kravetz 			/*
2215b65a4edaSMike Kravetz 			 * Unexpected inflated ref count on freshly allocated
2216b65a4edaSMike Kravetz 			 * huge.  Retry once.
2217b65a4edaSMike Kravetz 			 */
2218b65a4edaSMike Kravetz 			pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2219b65a4edaSMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
2220b65a4edaSMike Kravetz 			if (retry)
2221b65a4edaSMike Kravetz 				return NULL;
2222b65a4edaSMike Kravetz 
2223b65a4edaSMike Kravetz 			retry = true;
2224b65a4edaSMike Kravetz 			goto retry;
2225b65a4edaSMike Kravetz 		}
2226b65a4edaSMike Kravetz 		ClearHPageTemporary(page);
2227b65a4edaSMike Kravetz 	}
2228b65a4edaSMike Kravetz 
22299980d744SMichal Hocko 	h->surplus_huge_pages++;
22304704dea3SMichal Hocko 	h->surplus_huge_pages_node[page_to_nid(page)]++;
22319980d744SMichal Hocko 
22329980d744SMichal Hocko out_unlock:
2233db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
22347893d1d5SAdam Litke 
22357893d1d5SAdam Litke 	return page;
22367893d1d5SAdam Litke }
22377893d1d5SAdam Litke 
2238bbe88753SJoonsoo Kim static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2239ab5ac90aSMichal Hocko 				     int nid, nodemask_t *nmask)
2240ab5ac90aSMichal Hocko {
2241ab5ac90aSMichal Hocko 	struct page *page;
2242ab5ac90aSMichal Hocko 
2243ab5ac90aSMichal Hocko 	if (hstate_is_gigantic(h))
2244ab5ac90aSMichal Hocko 		return NULL;
2245ab5ac90aSMichal Hocko 
2246f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2247ab5ac90aSMichal Hocko 	if (!page)
2248ab5ac90aSMichal Hocko 		return NULL;
2249ab5ac90aSMichal Hocko 
2250ab5ac90aSMichal Hocko 	/*
2251ab5ac90aSMichal Hocko 	 * We do not account these pages as surplus because they are only
2252ab5ac90aSMichal Hocko 	 * temporary and will be released properly on the last reference
2253ab5ac90aSMichal Hocko 	 */
22549157c311SMike Kravetz 	SetHPageTemporary(page);
2255ab5ac90aSMichal Hocko 
2256ab5ac90aSMichal Hocko 	return page;
2257ab5ac90aSMichal Hocko }
2258ab5ac90aSMichal Hocko 
2259e4e574b7SAdam Litke /*
2260099730d6SDave Hansen  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2261099730d6SDave Hansen  */
2262e0ec90eeSDave Hansen static
22630c397daeSMichal Hocko struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2264099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr)
2265099730d6SDave Hansen {
2266cfcaa66fSBen Widawsky 	struct page *page = NULL;
2267aaf14e40SMichal Hocko 	struct mempolicy *mpol;
2268aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
2269aaf14e40SMichal Hocko 	int nid;
2270aaf14e40SMichal Hocko 	nodemask_t *nodemask;
2271aaf14e40SMichal Hocko 
2272aaf14e40SMichal Hocko 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2273cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
2274cfcaa66fSBen Widawsky 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2275cfcaa66fSBen Widawsky 
2276cfcaa66fSBen Widawsky 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2277cfcaa66fSBen Widawsky 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
2278cfcaa66fSBen Widawsky 
2279cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
2280cfcaa66fSBen Widawsky 		nodemask = NULL;
2281cfcaa66fSBen Widawsky 	}
2282cfcaa66fSBen Widawsky 
2283cfcaa66fSBen Widawsky 	if (!page)
2284b65a4edaSMike Kravetz 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2285aaf14e40SMichal Hocko 	mpol_cond_put(mpol);
2286aaf14e40SMichal Hocko 	return page;
2287099730d6SDave Hansen }
2288099730d6SDave Hansen 
2289ab5ac90aSMichal Hocko /* page migration callback function */
22903e59fcb0SMichal Hocko struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2291d92bbc27SJoonsoo Kim 		nodemask_t *nmask, gfp_t gfp_mask)
22924db9b2efSMichal Hocko {
2293db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
22944db9b2efSMichal Hocko 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
22953e59fcb0SMichal Hocko 		struct page *page;
22963e59fcb0SMichal Hocko 
22973e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
22983e59fcb0SMichal Hocko 		if (page) {
2299db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
23003e59fcb0SMichal Hocko 			return page;
23014db9b2efSMichal Hocko 		}
23024db9b2efSMichal Hocko 	}
2303db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
23044db9b2efSMichal Hocko 
23050c397daeSMichal Hocko 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
23064db9b2efSMichal Hocko }
23074db9b2efSMichal Hocko 
2308ebd63723SMichal Hocko /* mempolicy aware migration callback */
2309389c8178SMichal Hocko struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2310389c8178SMichal Hocko 		unsigned long address)
2311ebd63723SMichal Hocko {
2312ebd63723SMichal Hocko 	struct mempolicy *mpol;
2313ebd63723SMichal Hocko 	nodemask_t *nodemask;
2314ebd63723SMichal Hocko 	struct page *page;
2315ebd63723SMichal Hocko 	gfp_t gfp_mask;
2316ebd63723SMichal Hocko 	int node;
2317ebd63723SMichal Hocko 
2318ebd63723SMichal Hocko 	gfp_mask = htlb_alloc_mask(h);
2319ebd63723SMichal Hocko 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2320d92bbc27SJoonsoo Kim 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2321ebd63723SMichal Hocko 	mpol_cond_put(mpol);
2322ebd63723SMichal Hocko 
2323ebd63723SMichal Hocko 	return page;
2324ebd63723SMichal Hocko }
2325ebd63723SMichal Hocko 
2326bf50bab2SNaoya Horiguchi /*
232725985edcSLucas De Marchi  * Increase the hugetlb pool such that it can accommodate a reservation
2328e4e574b7SAdam Litke  * of size 'delta'.
2329e4e574b7SAdam Litke  */
23300a4f3d1bSLiu Xiang static int gather_surplus_pages(struct hstate *h, long delta)
23311b2a1e7bSJules Irenge 	__must_hold(&hugetlb_lock)
2332e4e574b7SAdam Litke {
2333e4e574b7SAdam Litke 	struct list_head surplus_list;
2334e4e574b7SAdam Litke 	struct page *page, *tmp;
23350a4f3d1bSLiu Xiang 	int ret;
23360a4f3d1bSLiu Xiang 	long i;
23370a4f3d1bSLiu Xiang 	long needed, allocated;
233828073b02SHillf Danton 	bool alloc_ok = true;
2339e4e574b7SAdam Litke 
23409487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2341a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2342ac09b3a1SAdam Litke 	if (needed <= 0) {
2343a5516438SAndi Kleen 		h->resv_huge_pages += delta;
2344e4e574b7SAdam Litke 		return 0;
2345ac09b3a1SAdam Litke 	}
2346e4e574b7SAdam Litke 
2347e4e574b7SAdam Litke 	allocated = 0;
2348e4e574b7SAdam Litke 	INIT_LIST_HEAD(&surplus_list);
2349e4e574b7SAdam Litke 
2350e4e574b7SAdam Litke 	ret = -ENOMEM;
2351e4e574b7SAdam Litke retry:
2352db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2353e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
23540c397daeSMichal Hocko 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2355b65a4edaSMike Kravetz 				NUMA_NO_NODE, NULL, true);
235628073b02SHillf Danton 		if (!page) {
235728073b02SHillf Danton 			alloc_ok = false;
235828073b02SHillf Danton 			break;
235928073b02SHillf Danton 		}
2360e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
236169ed779aSDavid Rientjes 		cond_resched();
2362e4e574b7SAdam Litke 	}
236328073b02SHillf Danton 	allocated += i;
2364e4e574b7SAdam Litke 
2365e4e574b7SAdam Litke 	/*
2366e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2367e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
2368e4e574b7SAdam Litke 	 */
2369db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2370a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
2371a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
237228073b02SHillf Danton 	if (needed > 0) {
237328073b02SHillf Danton 		if (alloc_ok)
2374e4e574b7SAdam Litke 			goto retry;
237528073b02SHillf Danton 		/*
237628073b02SHillf Danton 		 * We were not able to allocate enough pages to
237728073b02SHillf Danton 		 * satisfy the entire reservation so we free what
237828073b02SHillf Danton 		 * we've allocated so far.
237928073b02SHillf Danton 		 */
238028073b02SHillf Danton 		goto free;
238128073b02SHillf Danton 	}
2382e4e574b7SAdam Litke 	/*
2383e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
238425985edcSLucas De Marchi 	 * needed to accommodate the reservation.  Add the appropriate number
2385e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
2386ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
2387ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
2388ac09b3a1SAdam Litke 	 * before they are reserved.
2389e4e574b7SAdam Litke 	 */
2390e4e574b7SAdam Litke 	needed += allocated;
2391a5516438SAndi Kleen 	h->resv_huge_pages += delta;
2392e4e574b7SAdam Litke 	ret = 0;
2393a9869b83SNaoya Horiguchi 
239419fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
239519fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
239619fc3f0aSAdam Litke 		if ((--needed) < 0)
239719fc3f0aSAdam Litke 			break;
2398b65a4edaSMike Kravetz 		/* Add the page to the hugetlb allocator */
2399a5516438SAndi Kleen 		enqueue_huge_page(h, page);
240019fc3f0aSAdam Litke 	}
240128073b02SHillf Danton free:
2402db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
240319fc3f0aSAdam Litke 
2404b65a4edaSMike Kravetz 	/*
2405b65a4edaSMike Kravetz 	 * Free unnecessary surplus pages to the buddy allocator.
2406b65a4edaSMike Kravetz 	 * Pages have no ref count, call free_huge_page directly.
2407b65a4edaSMike Kravetz 	 */
2408c0d934baSJoonsoo Kim 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2409b65a4edaSMike Kravetz 		free_huge_page(page);
2410db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2411e4e574b7SAdam Litke 
2412e4e574b7SAdam Litke 	return ret;
2413e4e574b7SAdam Litke }
2414e4e574b7SAdam Litke 
2415e4e574b7SAdam Litke /*
2416e5bbc8a6SMike Kravetz  * This routine has two main purposes:
2417e5bbc8a6SMike Kravetz  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2418e5bbc8a6SMike Kravetz  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2419e5bbc8a6SMike Kravetz  *    to the associated reservation map.
2420e5bbc8a6SMike Kravetz  * 2) Free any unused surplus pages that may have been allocated to satisfy
2421e5bbc8a6SMike Kravetz  *    the reservation.  As many as unused_resv_pages may be freed.
2422e4e574b7SAdam Litke  */
2423a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
2424a5516438SAndi Kleen 					unsigned long unused_resv_pages)
2425e4e574b7SAdam Litke {
2426e4e574b7SAdam Litke 	unsigned long nr_pages;
242710c6ec49SMike Kravetz 	struct page *page;
242810c6ec49SMike Kravetz 	LIST_HEAD(page_list);
242910c6ec49SMike Kravetz 
24309487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
243110c6ec49SMike Kravetz 	/* Uncommit the reservation */
243210c6ec49SMike Kravetz 	h->resv_huge_pages -= unused_resv_pages;
2433e4e574b7SAdam Litke 
2434aa888a74SAndi Kleen 	/* Cannot return gigantic pages currently */
2435bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2436e5bbc8a6SMike Kravetz 		goto out;
2437aa888a74SAndi Kleen 
2438e5bbc8a6SMike Kravetz 	/*
2439e5bbc8a6SMike Kravetz 	 * Part (or even all) of the reservation could have been backed
2440e5bbc8a6SMike Kravetz 	 * by pre-allocated pages. Only free surplus pages.
2441e5bbc8a6SMike Kravetz 	 */
2442a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2443e4e574b7SAdam Litke 
2444685f3457SLee Schermerhorn 	/*
2445685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
24469b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
24479b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
24489b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
244910c6ec49SMike Kravetz 	 * remove_pool_huge_page() will balance the freed pages across the
24509b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
2451685f3457SLee Schermerhorn 	 */
2452685f3457SLee Schermerhorn 	while (nr_pages--) {
245310c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
245410c6ec49SMike Kravetz 		if (!page)
2455e5bbc8a6SMike Kravetz 			goto out;
245610c6ec49SMike Kravetz 
245710c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
2458e4e574b7SAdam Litke 	}
2459e5bbc8a6SMike Kravetz 
2460e5bbc8a6SMike Kravetz out:
2461db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
246210c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
2463db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2464e4e574b7SAdam Litke }
2465e4e574b7SAdam Litke 
24665e911373SMike Kravetz 
2467c37f9fb1SAndy Whitcroft /*
2468feba16e2SMike Kravetz  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
24695e911373SMike Kravetz  * are used by the huge page allocation routines to manage reservations.
2470cf3ad20bSMike Kravetz  *
2471cf3ad20bSMike Kravetz  * vma_needs_reservation is called to determine if the huge page at addr
2472cf3ad20bSMike Kravetz  * within the vma has an associated reservation.  If a reservation is
2473cf3ad20bSMike Kravetz  * needed, the value 1 is returned.  The caller is then responsible for
2474cf3ad20bSMike Kravetz  * managing the global reservation and subpool usage counts.  After
2475cf3ad20bSMike Kravetz  * the huge page has been allocated, vma_commit_reservation is called
2476feba16e2SMike Kravetz  * to add the page to the reservation map.  If the page allocation fails,
2477feba16e2SMike Kravetz  * the reservation must be ended instead of committed.  vma_end_reservation
2478feba16e2SMike Kravetz  * is called in such cases.
2479cf3ad20bSMike Kravetz  *
2480cf3ad20bSMike Kravetz  * In the normal case, vma_commit_reservation returns the same value
2481cf3ad20bSMike Kravetz  * as the preceding vma_needs_reservation call.  The only time this
2482cf3ad20bSMike Kravetz  * is not the case is if a reserve map was changed between calls.  It
2483cf3ad20bSMike Kravetz  * is the responsibility of the caller to notice the difference and
2484cf3ad20bSMike Kravetz  * take appropriate action.
248596b96a96SMike Kravetz  *
248696b96a96SMike Kravetz  * vma_add_reservation is used in error paths where a reservation must
248796b96a96SMike Kravetz  * be restored when a newly allocated huge page must be freed.  It is
248896b96a96SMike Kravetz  * to be called after calling vma_needs_reservation to determine if a
248996b96a96SMike Kravetz  * reservation exists.
2490846be085SMike Kravetz  *
2491846be085SMike Kravetz  * vma_del_reservation is used in error paths where an entry in the reserve
2492846be085SMike Kravetz  * map was created during huge page allocation and must be removed.  It is to
2493846be085SMike Kravetz  * be called after calling vma_needs_reservation to determine if a reservation
2494846be085SMike Kravetz  * exists.
2495c37f9fb1SAndy Whitcroft  */
24965e911373SMike Kravetz enum vma_resv_mode {
24975e911373SMike Kravetz 	VMA_NEEDS_RESV,
24985e911373SMike Kravetz 	VMA_COMMIT_RESV,
2499feba16e2SMike Kravetz 	VMA_END_RESV,
250096b96a96SMike Kravetz 	VMA_ADD_RESV,
2501846be085SMike Kravetz 	VMA_DEL_RESV,
25025e911373SMike Kravetz };
2503cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
2504cf3ad20bSMike Kravetz 				struct vm_area_struct *vma, unsigned long addr,
25055e911373SMike Kravetz 				enum vma_resv_mode mode)
2506c37f9fb1SAndy Whitcroft {
25074e35f483SJoonsoo Kim 	struct resv_map *resv;
25084e35f483SJoonsoo Kim 	pgoff_t idx;
2509cf3ad20bSMike Kravetz 	long ret;
25100db9d74eSMina Almasry 	long dummy_out_regions_needed;
2511c37f9fb1SAndy Whitcroft 
25124e35f483SJoonsoo Kim 	resv = vma_resv_map(vma);
25134e35f483SJoonsoo Kim 	if (!resv)
2514c37f9fb1SAndy Whitcroft 		return 1;
2515c37f9fb1SAndy Whitcroft 
25164e35f483SJoonsoo Kim 	idx = vma_hugecache_offset(h, vma, addr);
25175e911373SMike Kravetz 	switch (mode) {
25185e911373SMike Kravetz 	case VMA_NEEDS_RESV:
25190db9d74eSMina Almasry 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
25200db9d74eSMina Almasry 		/* We assume that vma_reservation_* routines always operate on
25210db9d74eSMina Almasry 		 * 1 page, and that adding to resv map a 1 page entry can only
25220db9d74eSMina Almasry 		 * ever require 1 region.
25230db9d74eSMina Almasry 		 */
25240db9d74eSMina Almasry 		VM_BUG_ON(dummy_out_regions_needed != 1);
25255e911373SMike Kravetz 		break;
25265e911373SMike Kravetz 	case VMA_COMMIT_RESV:
2527075a61d0SMina Almasry 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25280db9d74eSMina Almasry 		/* region_add calls of range 1 should never fail. */
25290db9d74eSMina Almasry 		VM_BUG_ON(ret < 0);
25305e911373SMike Kravetz 		break;
2531feba16e2SMike Kravetz 	case VMA_END_RESV:
25320db9d74eSMina Almasry 		region_abort(resv, idx, idx + 1, 1);
25335e911373SMike Kravetz 		ret = 0;
25345e911373SMike Kravetz 		break;
253596b96a96SMike Kravetz 	case VMA_ADD_RESV:
25360db9d74eSMina Almasry 		if (vma->vm_flags & VM_MAYSHARE) {
2537075a61d0SMina Almasry 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25380db9d74eSMina Almasry 			/* region_add calls of range 1 should never fail. */
25390db9d74eSMina Almasry 			VM_BUG_ON(ret < 0);
25400db9d74eSMina Almasry 		} else {
25410db9d74eSMina Almasry 			region_abort(resv, idx, idx + 1, 1);
254296b96a96SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
254396b96a96SMike Kravetz 		}
254496b96a96SMike Kravetz 		break;
2545846be085SMike Kravetz 	case VMA_DEL_RESV:
2546846be085SMike Kravetz 		if (vma->vm_flags & VM_MAYSHARE) {
2547846be085SMike Kravetz 			region_abort(resv, idx, idx + 1, 1);
2548846be085SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
2549846be085SMike Kravetz 		} else {
2550846be085SMike Kravetz 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2551846be085SMike Kravetz 			/* region_add calls of range 1 should never fail. */
2552846be085SMike Kravetz 			VM_BUG_ON(ret < 0);
2553846be085SMike Kravetz 		}
2554846be085SMike Kravetz 		break;
25555e911373SMike Kravetz 	default:
25565e911373SMike Kravetz 		BUG();
25575e911373SMike Kravetz 	}
255884afd99bSAndy Whitcroft 
2559846be085SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2560cf3ad20bSMike Kravetz 		return ret;
256167961f9dSMike Kravetz 	/*
2562bf3d12b9SMiaohe Lin 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2563bf3d12b9SMiaohe Lin 	 *
256467961f9dSMike Kravetz 	 * In most cases, reserves always exist for private mappings.
256567961f9dSMike Kravetz 	 * However, a file associated with mapping could have been
256667961f9dSMike Kravetz 	 * hole punched or truncated after reserves were consumed.
256767961f9dSMike Kravetz 	 * As subsequent fault on such a range will not use reserves.
256867961f9dSMike Kravetz 	 * Subtle - The reserve map for private mappings has the
256967961f9dSMike Kravetz 	 * opposite meaning than that of shared mappings.  If NO
257067961f9dSMike Kravetz 	 * entry is in the reserve map, it means a reservation exists.
257167961f9dSMike Kravetz 	 * If an entry exists in the reserve map, it means the
257267961f9dSMike Kravetz 	 * reservation has already been consumed.  As a result, the
257367961f9dSMike Kravetz 	 * return value of this routine is the opposite of the
257467961f9dSMike Kravetz 	 * value returned from reserve map manipulation routines above.
257567961f9dSMike Kravetz 	 */
2576bf3d12b9SMiaohe Lin 	if (ret > 0)
257767961f9dSMike Kravetz 		return 0;
2578bf3d12b9SMiaohe Lin 	if (ret == 0)
257967961f9dSMike Kravetz 		return 1;
2580bf3d12b9SMiaohe Lin 	return ret;
258184afd99bSAndy Whitcroft }
2582cf3ad20bSMike Kravetz 
2583cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
2584a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
2585c37f9fb1SAndy Whitcroft {
25865e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2587cf3ad20bSMike Kravetz }
2588c37f9fb1SAndy Whitcroft 
2589cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
2590cf3ad20bSMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
2591cf3ad20bSMike Kravetz {
25925e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
25935e911373SMike Kravetz }
25945e911373SMike Kravetz 
2595feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
25965e911373SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
25975e911373SMike Kravetz {
2598feba16e2SMike Kravetz 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2599c37f9fb1SAndy Whitcroft }
2600c37f9fb1SAndy Whitcroft 
260196b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
260296b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
260396b96a96SMike Kravetz {
260496b96a96SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
260596b96a96SMike Kravetz }
260696b96a96SMike Kravetz 
2607846be085SMike Kravetz static long vma_del_reservation(struct hstate *h,
2608846be085SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
260996b96a96SMike Kravetz {
2610846be085SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2611846be085SMike Kravetz }
2612846be085SMike Kravetz 
2613846be085SMike Kravetz /*
2614846be085SMike Kravetz  * This routine is called to restore reservation information on error paths.
2615846be085SMike Kravetz  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2616846be085SMike Kravetz  * the hugetlb mutex should remain held when calling this routine.
2617846be085SMike Kravetz  *
2618846be085SMike Kravetz  * It handles two specific cases:
2619846be085SMike Kravetz  * 1) A reservation was in place and the page consumed the reservation.
2620846be085SMike Kravetz  *    HPageRestoreReserve is set in the page.
2621846be085SMike Kravetz  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2622846be085SMike Kravetz  *    not set.  However, alloc_huge_page always updates the reserve map.
2623846be085SMike Kravetz  *
2624846be085SMike Kravetz  * In case 1, free_huge_page later in the error path will increment the
2625846be085SMike Kravetz  * global reserve count.  But, free_huge_page does not have enough context
2626846be085SMike Kravetz  * to adjust the reservation map.  This case deals primarily with private
2627846be085SMike Kravetz  * mappings.  Adjust the reserve map here to be consistent with global
2628846be085SMike Kravetz  * reserve count adjustments to be made by free_huge_page.  Make sure the
2629846be085SMike Kravetz  * reserve map indicates there is a reservation present.
2630846be085SMike Kravetz  *
2631846be085SMike Kravetz  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2632846be085SMike Kravetz  */
2633846be085SMike Kravetz void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2634846be085SMike Kravetz 			unsigned long address, struct page *page)
2635846be085SMike Kravetz {
263696b96a96SMike Kravetz 	long rc = vma_needs_reservation(h, vma, address);
263796b96a96SMike Kravetz 
2638846be085SMike Kravetz 	if (HPageRestoreReserve(page)) {
2639846be085SMike Kravetz 		if (unlikely(rc < 0))
264096b96a96SMike Kravetz 			/*
264196b96a96SMike Kravetz 			 * Rare out of memory condition in reserve map
2642d6995da3SMike Kravetz 			 * manipulation.  Clear HPageRestoreReserve so that
264396b96a96SMike Kravetz 			 * global reserve count will not be incremented
264496b96a96SMike Kravetz 			 * by free_huge_page.  This will make it appear
264596b96a96SMike Kravetz 			 * as though the reservation for this page was
264696b96a96SMike Kravetz 			 * consumed.  This may prevent the task from
264796b96a96SMike Kravetz 			 * faulting in the page at a later time.  This
264896b96a96SMike Kravetz 			 * is better than inconsistent global huge page
264996b96a96SMike Kravetz 			 * accounting of reserve counts.
265096b96a96SMike Kravetz 			 */
2651d6995da3SMike Kravetz 			ClearHPageRestoreReserve(page);
2652846be085SMike Kravetz 		else if (rc)
2653846be085SMike Kravetz 			(void)vma_add_reservation(h, vma, address);
2654846be085SMike Kravetz 		else
2655846be085SMike Kravetz 			vma_end_reservation(h, vma, address);
2656846be085SMike Kravetz 	} else {
2657846be085SMike Kravetz 		if (!rc) {
265896b96a96SMike Kravetz 			/*
2659846be085SMike Kravetz 			 * This indicates there is an entry in the reserve map
2660c7b1850dSMike Kravetz 			 * not added by alloc_huge_page.  We know it was added
2661846be085SMike Kravetz 			 * before the alloc_huge_page call, otherwise
2662846be085SMike Kravetz 			 * HPageRestoreReserve would be set on the page.
2663846be085SMike Kravetz 			 * Remove the entry so that a subsequent allocation
2664846be085SMike Kravetz 			 * does not consume a reservation.
266596b96a96SMike Kravetz 			 */
2666846be085SMike Kravetz 			rc = vma_del_reservation(h, vma, address);
2667846be085SMike Kravetz 			if (rc < 0)
2668846be085SMike Kravetz 				/*
2669846be085SMike Kravetz 				 * VERY rare out of memory condition.  Since
2670846be085SMike Kravetz 				 * we can not delete the entry, set
2671846be085SMike Kravetz 				 * HPageRestoreReserve so that the reserve
2672846be085SMike Kravetz 				 * count will be incremented when the page
2673846be085SMike Kravetz 				 * is freed.  This reserve will be consumed
2674846be085SMike Kravetz 				 * on a subsequent allocation.
2675846be085SMike Kravetz 				 */
2676846be085SMike Kravetz 				SetHPageRestoreReserve(page);
2677846be085SMike Kravetz 		} else if (rc < 0) {
2678846be085SMike Kravetz 			/*
2679846be085SMike Kravetz 			 * Rare out of memory condition from
2680846be085SMike Kravetz 			 * vma_needs_reservation call.  Memory allocation is
2681846be085SMike Kravetz 			 * only attempted if a new entry is needed.  Therefore,
2682846be085SMike Kravetz 			 * this implies there is not an entry in the
2683846be085SMike Kravetz 			 * reserve map.
2684846be085SMike Kravetz 			 *
2685846be085SMike Kravetz 			 * For shared mappings, no entry in the map indicates
2686846be085SMike Kravetz 			 * no reservation.  We are done.
2687846be085SMike Kravetz 			 */
2688846be085SMike Kravetz 			if (!(vma->vm_flags & VM_MAYSHARE))
2689846be085SMike Kravetz 				/*
2690846be085SMike Kravetz 				 * For private mappings, no entry indicates
2691846be085SMike Kravetz 				 * a reservation is present.  Since we can
2692846be085SMike Kravetz 				 * not add an entry, set SetHPageRestoreReserve
2693846be085SMike Kravetz 				 * on the page so reserve count will be
2694846be085SMike Kravetz 				 * incremented when freed.  This reserve will
2695846be085SMike Kravetz 				 * be consumed on a subsequent allocation.
2696846be085SMike Kravetz 				 */
2697846be085SMike Kravetz 				SetHPageRestoreReserve(page);
269896b96a96SMike Kravetz 		} else
2699846be085SMike Kravetz 			/*
2700846be085SMike Kravetz 			 * No reservation present, do nothing
2701846be085SMike Kravetz 			 */
270296b96a96SMike Kravetz 			 vma_end_reservation(h, vma, address);
270396b96a96SMike Kravetz 	}
270496b96a96SMike Kravetz }
270596b96a96SMike Kravetz 
2706369fa227SOscar Salvador /*
2707369fa227SOscar Salvador  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2708369fa227SOscar Salvador  * @h: struct hstate old page belongs to
2709369fa227SOscar Salvador  * @old_page: Old page to dissolve
2710ae37c7ffSOscar Salvador  * @list: List to isolate the page in case we need to
2711369fa227SOscar Salvador  * Returns 0 on success, otherwise negated error.
2712369fa227SOscar Salvador  */
2713ae37c7ffSOscar Salvador static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2714ae37c7ffSOscar Salvador 					struct list_head *list)
2715369fa227SOscar Salvador {
2716369fa227SOscar Salvador 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2717369fa227SOscar Salvador 	int nid = page_to_nid(old_page);
2718b65a4edaSMike Kravetz 	bool alloc_retry = false;
2719369fa227SOscar Salvador 	struct page *new_page;
2720369fa227SOscar Salvador 	int ret = 0;
2721369fa227SOscar Salvador 
2722369fa227SOscar Salvador 	/*
2723369fa227SOscar Salvador 	 * Before dissolving the page, we need to allocate a new one for the
2724f41f2ed4SMuchun Song 	 * pool to remain stable.  Here, we allocate the page and 'prep' it
2725f41f2ed4SMuchun Song 	 * by doing everything but actually updating counters and adding to
2726f41f2ed4SMuchun Song 	 * the pool.  This simplifies and let us do most of the processing
2727f41f2ed4SMuchun Song 	 * under the lock.
2728369fa227SOscar Salvador 	 */
2729b65a4edaSMike Kravetz alloc_retry:
2730369fa227SOscar Salvador 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2731369fa227SOscar Salvador 	if (!new_page)
2732369fa227SOscar Salvador 		return -ENOMEM;
2733b65a4edaSMike Kravetz 	/*
2734b65a4edaSMike Kravetz 	 * If all goes well, this page will be directly added to the free
2735b65a4edaSMike Kravetz 	 * list in the pool.  For this the ref count needs to be zero.
2736b65a4edaSMike Kravetz 	 * Attempt to drop now, and retry once if needed.  It is VERY
2737b65a4edaSMike Kravetz 	 * unlikely there is another ref on the page.
2738b65a4edaSMike Kravetz 	 *
2739b65a4edaSMike Kravetz 	 * If someone else has a reference to the page, it will be freed
2740b65a4edaSMike Kravetz 	 * when they drop their ref.  Abuse temporary page flag to accomplish
2741b65a4edaSMike Kravetz 	 * this.  Retry once if there is an inflated ref count.
2742b65a4edaSMike Kravetz 	 */
2743b65a4edaSMike Kravetz 	SetHPageTemporary(new_page);
2744b65a4edaSMike Kravetz 	if (!put_page_testzero(new_page)) {
2745b65a4edaSMike Kravetz 		if (alloc_retry)
2746b65a4edaSMike Kravetz 			return -EBUSY;
2747b65a4edaSMike Kravetz 
2748b65a4edaSMike Kravetz 		alloc_retry = true;
2749b65a4edaSMike Kravetz 		goto alloc_retry;
2750b65a4edaSMike Kravetz 	}
2751b65a4edaSMike Kravetz 	ClearHPageTemporary(new_page);
2752b65a4edaSMike Kravetz 
2753f41f2ed4SMuchun Song 	__prep_new_huge_page(h, new_page);
2754369fa227SOscar Salvador 
2755369fa227SOscar Salvador retry:
2756369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2757369fa227SOscar Salvador 	if (!PageHuge(old_page)) {
2758369fa227SOscar Salvador 		/*
2759369fa227SOscar Salvador 		 * Freed from under us. Drop new_page too.
2760369fa227SOscar Salvador 		 */
2761369fa227SOscar Salvador 		goto free_new;
2762369fa227SOscar Salvador 	} else if (page_count(old_page)) {
2763369fa227SOscar Salvador 		/*
2764ae37c7ffSOscar Salvador 		 * Someone has grabbed the page, try to isolate it here.
2765ae37c7ffSOscar Salvador 		 * Fail with -EBUSY if not possible.
2766369fa227SOscar Salvador 		 */
2767ae37c7ffSOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2768ae37c7ffSOscar Salvador 		if (!isolate_huge_page(old_page, list))
2769369fa227SOscar Salvador 			ret = -EBUSY;
2770ae37c7ffSOscar Salvador 		spin_lock_irq(&hugetlb_lock);
2771369fa227SOscar Salvador 		goto free_new;
2772369fa227SOscar Salvador 	} else if (!HPageFreed(old_page)) {
2773369fa227SOscar Salvador 		/*
2774369fa227SOscar Salvador 		 * Page's refcount is 0 but it has not been enqueued in the
2775369fa227SOscar Salvador 		 * freelist yet. Race window is small, so we can succeed here if
2776369fa227SOscar Salvador 		 * we retry.
2777369fa227SOscar Salvador 		 */
2778369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2779369fa227SOscar Salvador 		cond_resched();
2780369fa227SOscar Salvador 		goto retry;
2781369fa227SOscar Salvador 	} else {
2782369fa227SOscar Salvador 		/*
2783369fa227SOscar Salvador 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2784369fa227SOscar Salvador 		 * the freelist and decrease the counters. These will be
2785369fa227SOscar Salvador 		 * incremented again when calling __prep_account_new_huge_page()
2786369fa227SOscar Salvador 		 * and enqueue_huge_page() for new_page. The counters will remain
2787369fa227SOscar Salvador 		 * stable since this happens under the lock.
2788369fa227SOscar Salvador 		 */
2789369fa227SOscar Salvador 		remove_hugetlb_page(h, old_page, false);
2790369fa227SOscar Salvador 
2791369fa227SOscar Salvador 		/*
2792b65a4edaSMike Kravetz 		 * Ref count on new page is already zero as it was dropped
2793b65a4edaSMike Kravetz 		 * earlier.  It can be directly added to the pool free list.
2794369fa227SOscar Salvador 		 */
2795369fa227SOscar Salvador 		__prep_account_new_huge_page(h, nid);
2796369fa227SOscar Salvador 		enqueue_huge_page(h, new_page);
2797369fa227SOscar Salvador 
2798369fa227SOscar Salvador 		/*
2799369fa227SOscar Salvador 		 * Pages have been replaced, we can safely free the old one.
2800369fa227SOscar Salvador 		 */
2801369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2802b65d4adbSMuchun Song 		update_and_free_page(h, old_page, false);
2803369fa227SOscar Salvador 	}
2804369fa227SOscar Salvador 
2805369fa227SOscar Salvador 	return ret;
2806369fa227SOscar Salvador 
2807369fa227SOscar Salvador free_new:
2808369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2809b65a4edaSMike Kravetz 	/* Page has a zero ref count, but needs a ref to be freed */
2810b65a4edaSMike Kravetz 	set_page_refcounted(new_page);
2811b65d4adbSMuchun Song 	update_and_free_page(h, new_page, false);
2812369fa227SOscar Salvador 
2813369fa227SOscar Salvador 	return ret;
2814369fa227SOscar Salvador }
2815369fa227SOscar Salvador 
2816ae37c7ffSOscar Salvador int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2817369fa227SOscar Salvador {
2818369fa227SOscar Salvador 	struct hstate *h;
2819369fa227SOscar Salvador 	struct page *head;
2820ae37c7ffSOscar Salvador 	int ret = -EBUSY;
2821369fa227SOscar Salvador 
2822369fa227SOscar Salvador 	/*
2823369fa227SOscar Salvador 	 * The page might have been dissolved from under our feet, so make sure
2824369fa227SOscar Salvador 	 * to carefully check the state under the lock.
2825369fa227SOscar Salvador 	 * Return success when racing as if we dissolved the page ourselves.
2826369fa227SOscar Salvador 	 */
2827369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2828369fa227SOscar Salvador 	if (PageHuge(page)) {
2829369fa227SOscar Salvador 		head = compound_head(page);
2830369fa227SOscar Salvador 		h = page_hstate(head);
2831369fa227SOscar Salvador 	} else {
2832369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2833369fa227SOscar Salvador 		return 0;
2834369fa227SOscar Salvador 	}
2835369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2836369fa227SOscar Salvador 
2837369fa227SOscar Salvador 	/*
2838369fa227SOscar Salvador 	 * Fence off gigantic pages as there is a cyclic dependency between
2839369fa227SOscar Salvador 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2840369fa227SOscar Salvador 	 * of bailing out right away without further retrying.
2841369fa227SOscar Salvador 	 */
2842369fa227SOscar Salvador 	if (hstate_is_gigantic(h))
2843369fa227SOscar Salvador 		return -ENOMEM;
2844369fa227SOscar Salvador 
2845ae37c7ffSOscar Salvador 	if (page_count(head) && isolate_huge_page(head, list))
2846ae37c7ffSOscar Salvador 		ret = 0;
2847ae37c7ffSOscar Salvador 	else if (!page_count(head))
2848ae37c7ffSOscar Salvador 		ret = alloc_and_dissolve_huge_page(h, head, list);
2849ae37c7ffSOscar Salvador 
2850ae37c7ffSOscar Salvador 	return ret;
2851369fa227SOscar Salvador }
2852369fa227SOscar Salvador 
285370c3547eSMike Kravetz struct page *alloc_huge_page(struct vm_area_struct *vma,
285404f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
2855348ea204SAdam Litke {
285690481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
2857a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2858348ea204SAdam Litke 	struct page *page;
2859d85f69b0SMike Kravetz 	long map_chg, map_commit;
2860d85f69b0SMike Kravetz 	long gbl_chg;
28616d76dcf4SAneesh Kumar K.V 	int ret, idx;
28626d76dcf4SAneesh Kumar K.V 	struct hugetlb_cgroup *h_cg;
286308cf9fafSMina Almasry 	bool deferred_reserve;
28642fc39cecSAdam Litke 
28656d76dcf4SAneesh Kumar K.V 	idx = hstate_index(h);
2866a1e78772SMel Gorman 	/*
2867d85f69b0SMike Kravetz 	 * Examine the region/reserve map to determine if the process
2868d85f69b0SMike Kravetz 	 * has a reservation for the page to be allocated.  A return
2869d85f69b0SMike Kravetz 	 * code of zero indicates a reservation exists (no change).
2870a1e78772SMel Gorman 	 */
2871d85f69b0SMike Kravetz 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2872d85f69b0SMike Kravetz 	if (map_chg < 0)
287376dcee75SAneesh Kumar K.V 		return ERR_PTR(-ENOMEM);
2874d85f69b0SMike Kravetz 
2875d85f69b0SMike Kravetz 	/*
2876d85f69b0SMike Kravetz 	 * Processes that did not create the mapping will have no
2877d85f69b0SMike Kravetz 	 * reserves as indicated by the region/reserve map. Check
2878d85f69b0SMike Kravetz 	 * that the allocation will not exceed the subpool limit.
2879d85f69b0SMike Kravetz 	 * Allocations for MAP_NORESERVE mappings also need to be
2880d85f69b0SMike Kravetz 	 * checked against any subpool limit.
2881d85f69b0SMike Kravetz 	 */
2882d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve) {
2883d85f69b0SMike Kravetz 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2884d85f69b0SMike Kravetz 		if (gbl_chg < 0) {
2885feba16e2SMike Kravetz 			vma_end_reservation(h, vma, addr);
288676dcee75SAneesh Kumar K.V 			return ERR_PTR(-ENOSPC);
28875e911373SMike Kravetz 		}
288890d8b7e6SAdam Litke 
2889d85f69b0SMike Kravetz 		/*
2890d85f69b0SMike Kravetz 		 * Even though there was no reservation in the region/reserve
2891d85f69b0SMike Kravetz 		 * map, there could be reservations associated with the
2892d85f69b0SMike Kravetz 		 * subpool that can be used.  This would be indicated if the
2893d85f69b0SMike Kravetz 		 * return value of hugepage_subpool_get_pages() is zero.
2894d85f69b0SMike Kravetz 		 * However, if avoid_reserve is specified we still avoid even
2895d85f69b0SMike Kravetz 		 * the subpool reservations.
2896d85f69b0SMike Kravetz 		 */
2897d85f69b0SMike Kravetz 		if (avoid_reserve)
2898d85f69b0SMike Kravetz 			gbl_chg = 1;
2899d85f69b0SMike Kravetz 	}
2900d85f69b0SMike Kravetz 
290108cf9fafSMina Almasry 	/* If this allocation is not consuming a reservation, charge it now.
290208cf9fafSMina Almasry 	 */
29036501fe5fSMiaohe Lin 	deferred_reserve = map_chg || avoid_reserve;
290408cf9fafSMina Almasry 	if (deferred_reserve) {
290508cf9fafSMina Almasry 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
290608cf9fafSMina Almasry 			idx, pages_per_huge_page(h), &h_cg);
29078f34af6fSJianyu Zhan 		if (ret)
29088f34af6fSJianyu Zhan 			goto out_subpool_put;
290908cf9fafSMina Almasry 	}
291008cf9fafSMina Almasry 
291108cf9fafSMina Almasry 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
291208cf9fafSMina Almasry 	if (ret)
291308cf9fafSMina Almasry 		goto out_uncharge_cgroup_reservation;
29148f34af6fSJianyu Zhan 
2915db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2916d85f69b0SMike Kravetz 	/*
2917d85f69b0SMike Kravetz 	 * glb_chg is passed to indicate whether or not a page must be taken
2918d85f69b0SMike Kravetz 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2919d85f69b0SMike Kravetz 	 * a reservation exists for the allocation.
2920d85f69b0SMike Kravetz 	 */
2921d85f69b0SMike Kravetz 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
292281a6fcaeSJoonsoo Kim 	if (!page) {
2923db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
29240c397daeSMichal Hocko 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
29258f34af6fSJianyu Zhan 		if (!page)
29268f34af6fSJianyu Zhan 			goto out_uncharge_cgroup;
2927a88c7695SNaoya Horiguchi 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2928d6995da3SMike Kravetz 			SetHPageRestoreReserve(page);
2929a88c7695SNaoya Horiguchi 			h->resv_huge_pages--;
2930a88c7695SNaoya Horiguchi 		}
2931db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
293215a8d68eSWei Yang 		list_add(&page->lru, &h->hugepage_activelist);
293381a6fcaeSJoonsoo Kim 		/* Fall through */
2934a1e78772SMel Gorman 	}
293581a6fcaeSJoonsoo Kim 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
293608cf9fafSMina Almasry 	/* If allocation is not consuming a reservation, also store the
293708cf9fafSMina Almasry 	 * hugetlb_cgroup pointer on the page.
293808cf9fafSMina Almasry 	 */
293908cf9fafSMina Almasry 	if (deferred_reserve) {
294008cf9fafSMina Almasry 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
294108cf9fafSMina Almasry 						  h_cg, page);
294208cf9fafSMina Almasry 	}
294308cf9fafSMina Almasry 
2944db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2945a1e78772SMel Gorman 
2946d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, spool);
2947a1e78772SMel Gorman 
2948d85f69b0SMike Kravetz 	map_commit = vma_commit_reservation(h, vma, addr);
2949d85f69b0SMike Kravetz 	if (unlikely(map_chg > map_commit)) {
295033039678SMike Kravetz 		/*
295133039678SMike Kravetz 		 * The page was added to the reservation map between
295233039678SMike Kravetz 		 * vma_needs_reservation and vma_commit_reservation.
295333039678SMike Kravetz 		 * This indicates a race with hugetlb_reserve_pages.
295433039678SMike Kravetz 		 * Adjust for the subpool count incremented above AND
295533039678SMike Kravetz 		 * in hugetlb_reserve_pages for the same page.  Also,
295633039678SMike Kravetz 		 * the reservation count added in hugetlb_reserve_pages
295733039678SMike Kravetz 		 * no longer applies.
295833039678SMike Kravetz 		 */
295933039678SMike Kravetz 		long rsv_adjust;
296033039678SMike Kravetz 
296133039678SMike Kravetz 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
296233039678SMike Kravetz 		hugetlb_acct_memory(h, -rsv_adjust);
296379aa925bSMike Kravetz 		if (deferred_reserve)
296479aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
296579aa925bSMike Kravetz 					pages_per_huge_page(h), page);
296633039678SMike Kravetz 	}
29677893d1d5SAdam Litke 	return page;
29688f34af6fSJianyu Zhan 
29698f34af6fSJianyu Zhan out_uncharge_cgroup:
29708f34af6fSJianyu Zhan 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
297108cf9fafSMina Almasry out_uncharge_cgroup_reservation:
297208cf9fafSMina Almasry 	if (deferred_reserve)
297308cf9fafSMina Almasry 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
297408cf9fafSMina Almasry 						    h_cg);
29758f34af6fSJianyu Zhan out_subpool_put:
2976d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve)
29778f34af6fSJianyu Zhan 		hugepage_subpool_put_pages(spool, 1);
2978feba16e2SMike Kravetz 	vma_end_reservation(h, vma, addr);
29798f34af6fSJianyu Zhan 	return ERR_PTR(-ENOSPC);
2980b45b5bd6SDavid Gibson }
2981b45b5bd6SDavid Gibson 
2982b5389086SZhenguo Yao int alloc_bootmem_huge_page(struct hstate *h, int nid)
2983e24a1307SAneesh Kumar K.V 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2984b5389086SZhenguo Yao int __alloc_bootmem_huge_page(struct hstate *h, int nid)
2985aa888a74SAndi Kleen {
2986b5389086SZhenguo Yao 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
2987b2261026SJoonsoo Kim 	int nr_nodes, node;
2988aa888a74SAndi Kleen 
2989b5389086SZhenguo Yao 	/* do node specific alloc */
2990b5389086SZhenguo Yao 	if (nid != NUMA_NO_NODE) {
2991b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
2992b5389086SZhenguo Yao 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
2993b5389086SZhenguo Yao 		if (!m)
2994b5389086SZhenguo Yao 			return 0;
2995b5389086SZhenguo Yao 		goto found;
2996b5389086SZhenguo Yao 	}
2997b5389086SZhenguo Yao 	/* allocate from next node when distributing huge pages */
2998b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2999b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(
30008b89a116SGrygorii Strashko 				huge_page_size(h), huge_page_size(h),
300197ad1087SMike Rapoport 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3002aa888a74SAndi Kleen 		/*
3003aa888a74SAndi Kleen 		 * Use the beginning of the huge page to store the
3004aa888a74SAndi Kleen 		 * huge_bootmem_page struct (until gather_bootmem
3005aa888a74SAndi Kleen 		 * puts them into the mem_map).
3006aa888a74SAndi Kleen 		 */
3007b5389086SZhenguo Yao 		if (!m)
3008b5389086SZhenguo Yao 			return 0;
3009aa888a74SAndi Kleen 		goto found;
3010aa888a74SAndi Kleen 	}
3011aa888a74SAndi Kleen 
3012aa888a74SAndi Kleen found:
3013aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
3014330d6e48SCannon Matthews 	INIT_LIST_HEAD(&m->list);
3015aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
3016aa888a74SAndi Kleen 	m->hstate = h;
3017aa888a74SAndi Kleen 	return 1;
3018aa888a74SAndi Kleen }
3019aa888a74SAndi Kleen 
302048b8d744SMike Kravetz /*
302148b8d744SMike Kravetz  * Put bootmem huge pages into the standard lists after mem_map is up.
302248b8d744SMike Kravetz  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
302348b8d744SMike Kravetz  */
3024aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
3025aa888a74SAndi Kleen {
3026aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
3027aa888a74SAndi Kleen 
3028aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
302940d18ebfSMike Kravetz 		struct page *page = virt_to_page(m);
3030aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
3031ee8f248dSBecky Bruce 
303248b8d744SMike Kravetz 		VM_BUG_ON(!hstate_is_gigantic(h));
3033aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
30347118fc29SMike Kravetz 		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3035ef5a22beSAndrea Arcangeli 			WARN_ON(PageReserved(page));
3036aa888a74SAndi Kleen 			prep_new_huge_page(h, page, page_to_nid(page));
30377118fc29SMike Kravetz 			put_page(page); /* add to the hugepage allocator */
30387118fc29SMike Kravetz 		} else {
3039416d85edSMike Kravetz 			/* VERY unlikely inflated ref count on a tail page */
30407118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
30417118fc29SMike Kravetz 		}
3042af0fb9dfSMichal Hocko 
3043b0320c7bSRafael Aquini 		/*
304448b8d744SMike Kravetz 		 * We need to restore the 'stolen' pages to totalram_pages
304548b8d744SMike Kravetz 		 * in order to fix confusing memory reports from free(1) and
304648b8d744SMike Kravetz 		 * other side-effects, like CommitLimit going negative.
3047b0320c7bSRafael Aquini 		 */
3048c78a7f36SMiaohe Lin 		adjust_managed_page_count(page, pages_per_huge_page(h));
3049520495feSCannon Matthews 		cond_resched();
3050aa888a74SAndi Kleen 	}
3051aa888a74SAndi Kleen }
3052b5389086SZhenguo Yao static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3053b5389086SZhenguo Yao {
3054b5389086SZhenguo Yao 	unsigned long i;
3055b5389086SZhenguo Yao 	char buf[32];
3056b5389086SZhenguo Yao 
3057b5389086SZhenguo Yao 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3058b5389086SZhenguo Yao 		if (hstate_is_gigantic(h)) {
3059b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, nid))
3060b5389086SZhenguo Yao 				break;
3061b5389086SZhenguo Yao 		} else {
3062b5389086SZhenguo Yao 			struct page *page;
3063b5389086SZhenguo Yao 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3064b5389086SZhenguo Yao 
3065b5389086SZhenguo Yao 			page = alloc_fresh_huge_page(h, gfp_mask, nid,
3066b5389086SZhenguo Yao 					&node_states[N_MEMORY], NULL);
3067b5389086SZhenguo Yao 			if (!page)
3068b5389086SZhenguo Yao 				break;
3069b5389086SZhenguo Yao 			put_page(page); /* free it into the hugepage allocator */
3070b5389086SZhenguo Yao 		}
3071b5389086SZhenguo Yao 		cond_resched();
3072b5389086SZhenguo Yao 	}
3073b5389086SZhenguo Yao 	if (i == h->max_huge_pages_node[nid])
3074b5389086SZhenguo Yao 		return;
3075b5389086SZhenguo Yao 
3076b5389086SZhenguo Yao 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3077b5389086SZhenguo Yao 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3078b5389086SZhenguo Yao 		h->max_huge_pages_node[nid], buf, nid, i);
3079b5389086SZhenguo Yao 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3080b5389086SZhenguo Yao 	h->max_huge_pages_node[nid] = i;
3081b5389086SZhenguo Yao }
3082aa888a74SAndi Kleen 
30838faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
30841da177e4SLinus Torvalds {
30851da177e4SLinus Torvalds 	unsigned long i;
3086f60858f9SMike Kravetz 	nodemask_t *node_alloc_noretry;
3087b5389086SZhenguo Yao 	bool node_specific_alloc = false;
3088f60858f9SMike Kravetz 
3089b5389086SZhenguo Yao 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3090b5389086SZhenguo Yao 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3091b5389086SZhenguo Yao 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3092b5389086SZhenguo Yao 		return;
3093b5389086SZhenguo Yao 	}
3094b5389086SZhenguo Yao 
3095b5389086SZhenguo Yao 	/* do node specific alloc */
30960a7a0f6fSPeng Liu 	for_each_online_node(i) {
3097b5389086SZhenguo Yao 		if (h->max_huge_pages_node[i] > 0) {
3098b5389086SZhenguo Yao 			hugetlb_hstate_alloc_pages_onenode(h, i);
3099b5389086SZhenguo Yao 			node_specific_alloc = true;
3100b5389086SZhenguo Yao 		}
3101b5389086SZhenguo Yao 	}
3102b5389086SZhenguo Yao 
3103b5389086SZhenguo Yao 	if (node_specific_alloc)
3104b5389086SZhenguo Yao 		return;
3105b5389086SZhenguo Yao 
3106b5389086SZhenguo Yao 	/* below will do all node balanced alloc */
3107f60858f9SMike Kravetz 	if (!hstate_is_gigantic(h)) {
3108f60858f9SMike Kravetz 		/*
3109f60858f9SMike Kravetz 		 * Bit mask controlling how hard we retry per-node allocations.
3110f60858f9SMike Kravetz 		 * Ignore errors as lower level routines can deal with
3111f60858f9SMike Kravetz 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3112f60858f9SMike Kravetz 		 * time, we are likely in bigger trouble.
3113f60858f9SMike Kravetz 		 */
3114f60858f9SMike Kravetz 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3115f60858f9SMike Kravetz 						GFP_KERNEL);
3116f60858f9SMike Kravetz 	} else {
3117f60858f9SMike Kravetz 		/* allocations done at boot time */
3118f60858f9SMike Kravetz 		node_alloc_noretry = NULL;
3119f60858f9SMike Kravetz 	}
3120f60858f9SMike Kravetz 
3121f60858f9SMike Kravetz 	/* bit mask controlling how hard we retry per-node allocations */
3122f60858f9SMike Kravetz 	if (node_alloc_noretry)
3123f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
31241da177e4SLinus Torvalds 
3125e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
3126bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h)) {
3127b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3128aa888a74SAndi Kleen 				break;
31290c397daeSMichal Hocko 		} else if (!alloc_pool_huge_page(h,
3130f60858f9SMike Kravetz 					 &node_states[N_MEMORY],
3131f60858f9SMike Kravetz 					 node_alloc_noretry))
31321da177e4SLinus Torvalds 			break;
313369ed779aSDavid Rientjes 		cond_resched();
31341da177e4SLinus Torvalds 	}
3135d715cf80SLiam R. Howlett 	if (i < h->max_huge_pages) {
3136d715cf80SLiam R. Howlett 		char buf[32];
3137d715cf80SLiam R. Howlett 
3138c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3139d715cf80SLiam R. Howlett 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3140d715cf80SLiam R. Howlett 			h->max_huge_pages, buf, i);
31418faa8b07SAndi Kleen 		h->max_huge_pages = i;
3142e5ff2159SAndi Kleen 	}
3143f60858f9SMike Kravetz 	kfree(node_alloc_noretry);
3144d715cf80SLiam R. Howlett }
3145e5ff2159SAndi Kleen 
3146e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
3147e5ff2159SAndi Kleen {
314879dfc695SMike Kravetz 	struct hstate *h, *h2;
3149e5ff2159SAndi Kleen 
3150e5ff2159SAndi Kleen 	for_each_hstate(h) {
3151641844f5SNaoya Horiguchi 		if (minimum_order > huge_page_order(h))
3152641844f5SNaoya Horiguchi 			minimum_order = huge_page_order(h);
3153641844f5SNaoya Horiguchi 
31548faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
3155bae7f4aeSLuiz Capitulino 		if (!hstate_is_gigantic(h))
31568faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
315779dfc695SMike Kravetz 
315879dfc695SMike Kravetz 		/*
315979dfc695SMike Kravetz 		 * Set demote order for each hstate.  Note that
316079dfc695SMike Kravetz 		 * h->demote_order is initially 0.
316179dfc695SMike Kravetz 		 * - We can not demote gigantic pages if runtime freeing
316279dfc695SMike Kravetz 		 *   is not supported, so skip this.
3163a01f4390SMike Kravetz 		 * - If CMA allocation is possible, we can not demote
3164a01f4390SMike Kravetz 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
316579dfc695SMike Kravetz 		 */
316679dfc695SMike Kravetz 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
316779dfc695SMike Kravetz 			continue;
3168a01f4390SMike Kravetz 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3169a01f4390SMike Kravetz 			continue;
317079dfc695SMike Kravetz 		for_each_hstate(h2) {
317179dfc695SMike Kravetz 			if (h2 == h)
317279dfc695SMike Kravetz 				continue;
317379dfc695SMike Kravetz 			if (h2->order < h->order &&
317479dfc695SMike Kravetz 			    h2->order > h->demote_order)
317579dfc695SMike Kravetz 				h->demote_order = h2->order;
317679dfc695SMike Kravetz 		}
3177e5ff2159SAndi Kleen 	}
3178641844f5SNaoya Horiguchi 	VM_BUG_ON(minimum_order == UINT_MAX);
3179e5ff2159SAndi Kleen }
3180e5ff2159SAndi Kleen 
3181e5ff2159SAndi Kleen static void __init report_hugepages(void)
3182e5ff2159SAndi Kleen {
3183e5ff2159SAndi Kleen 	struct hstate *h;
3184e5ff2159SAndi Kleen 
3185e5ff2159SAndi Kleen 	for_each_hstate(h) {
31864abd32dbSAndi Kleen 		char buf[32];
3187c6247f72SMatthew Wilcox 
3188c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3189ffb22af5SAndrew Morton 		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
3190c6247f72SMatthew Wilcox 			buf, h->free_huge_pages);
3191e5ff2159SAndi Kleen 	}
3192e5ff2159SAndi Kleen }
3193e5ff2159SAndi Kleen 
31941da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
31956ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
31966ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
31971da177e4SLinus Torvalds {
31984415cc8dSChristoph Lameter 	int i;
31991121828aSMike Kravetz 	LIST_HEAD(page_list);
32004415cc8dSChristoph Lameter 
32019487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
3202bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3203aa888a74SAndi Kleen 		return;
3204aa888a74SAndi Kleen 
32051121828aSMike Kravetz 	/*
32061121828aSMike Kravetz 	 * Collect pages to be freed on a list, and free after dropping lock
32071121828aSMike Kravetz 	 */
32086ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
320910c6ec49SMike Kravetz 		struct page *page, *next;
3210a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
3211a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
3212a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
32131121828aSMike Kravetz 				goto out;
32141da177e4SLinus Torvalds 			if (PageHighMem(page))
32151da177e4SLinus Torvalds 				continue;
32166eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, false);
32171121828aSMike Kravetz 			list_add(&page->lru, &page_list);
32181121828aSMike Kravetz 		}
32191121828aSMike Kravetz 	}
32201121828aSMike Kravetz 
32211121828aSMike Kravetz out:
3222db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
322310c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3224db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
32251da177e4SLinus Torvalds }
32261da177e4SLinus Torvalds #else
32276ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
32286ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
32291da177e4SLinus Torvalds {
32301da177e4SLinus Torvalds }
32311da177e4SLinus Torvalds #endif
32321da177e4SLinus Torvalds 
323320a0307cSWu Fengguang /*
323420a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
323520a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
323620a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
323720a0307cSWu Fengguang  */
32386ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
32396ae11b27SLee Schermerhorn 				int delta)
324020a0307cSWu Fengguang {
3241b2261026SJoonsoo Kim 	int nr_nodes, node;
324220a0307cSWu Fengguang 
32439487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
324420a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
324520a0307cSWu Fengguang 
3246e8c5c824SLee Schermerhorn 	if (delta < 0) {
3247b2261026SJoonsoo Kim 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3248b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node])
3249b2261026SJoonsoo Kim 				goto found;
3250b2261026SJoonsoo Kim 		}
3251b2261026SJoonsoo Kim 	} else {
3252b2261026SJoonsoo Kim 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3253b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node] <
3254b2261026SJoonsoo Kim 					h->nr_huge_pages_node[node])
3255b2261026SJoonsoo Kim 				goto found;
3256e8c5c824SLee Schermerhorn 		}
32579a76db09SLee Schermerhorn 	}
3258b2261026SJoonsoo Kim 	return 0;
325920a0307cSWu Fengguang 
3260b2261026SJoonsoo Kim found:
326120a0307cSWu Fengguang 	h->surplus_huge_pages += delta;
3262b2261026SJoonsoo Kim 	h->surplus_huge_pages_node[node] += delta;
3263b2261026SJoonsoo Kim 	return 1;
326420a0307cSWu Fengguang }
326520a0307cSWu Fengguang 
3266a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3267fd875dcaSMike Kravetz static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
32686ae11b27SLee Schermerhorn 			      nodemask_t *nodes_allowed)
32691da177e4SLinus Torvalds {
32707893d1d5SAdam Litke 	unsigned long min_count, ret;
327110c6ec49SMike Kravetz 	struct page *page;
327210c6ec49SMike Kravetz 	LIST_HEAD(page_list);
3273f60858f9SMike Kravetz 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3274f60858f9SMike Kravetz 
3275f60858f9SMike Kravetz 	/*
3276f60858f9SMike Kravetz 	 * Bit mask controlling how hard we retry per-node allocations.
3277f60858f9SMike Kravetz 	 * If we can not allocate the bit mask, do not attempt to allocate
3278f60858f9SMike Kravetz 	 * the requested huge pages.
3279f60858f9SMike Kravetz 	 */
3280f60858f9SMike Kravetz 	if (node_alloc_noretry)
3281f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
3282f60858f9SMike Kravetz 	else
3283f60858f9SMike Kravetz 		return -ENOMEM;
32841da177e4SLinus Torvalds 
328529383967SMike Kravetz 	/*
328629383967SMike Kravetz 	 * resize_lock mutex prevents concurrent adjustments to number of
328729383967SMike Kravetz 	 * pages in hstate via the proc/sysfs interfaces.
328829383967SMike Kravetz 	 */
328929383967SMike Kravetz 	mutex_lock(&h->resize_lock);
3290b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3291db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
32924eb0716eSAlexandre Ghiti 
32934eb0716eSAlexandre Ghiti 	/*
3294fd875dcaSMike Kravetz 	 * Check for a node specific request.
3295fd875dcaSMike Kravetz 	 * Changing node specific huge page count may require a corresponding
3296fd875dcaSMike Kravetz 	 * change to the global count.  In any case, the passed node mask
3297fd875dcaSMike Kravetz 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3298fd875dcaSMike Kravetz 	 */
3299fd875dcaSMike Kravetz 	if (nid != NUMA_NO_NODE) {
3300fd875dcaSMike Kravetz 		unsigned long old_count = count;
3301fd875dcaSMike Kravetz 
3302fd875dcaSMike Kravetz 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3303fd875dcaSMike Kravetz 		/*
3304fd875dcaSMike Kravetz 		 * User may have specified a large count value which caused the
3305fd875dcaSMike Kravetz 		 * above calculation to overflow.  In this case, they wanted
3306fd875dcaSMike Kravetz 		 * to allocate as many huge pages as possible.  Set count to
3307fd875dcaSMike Kravetz 		 * largest possible value to align with their intention.
3308fd875dcaSMike Kravetz 		 */
3309fd875dcaSMike Kravetz 		if (count < old_count)
3310fd875dcaSMike Kravetz 			count = ULONG_MAX;
3311fd875dcaSMike Kravetz 	}
3312fd875dcaSMike Kravetz 
3313fd875dcaSMike Kravetz 	/*
33144eb0716eSAlexandre Ghiti 	 * Gigantic pages runtime allocation depend on the capability for large
33154eb0716eSAlexandre Ghiti 	 * page range allocation.
33164eb0716eSAlexandre Ghiti 	 * If the system does not provide this feature, return an error when
33174eb0716eSAlexandre Ghiti 	 * the user tries to allocate gigantic pages but let the user free the
33184eb0716eSAlexandre Ghiti 	 * boottime allocated gigantic pages.
33194eb0716eSAlexandre Ghiti 	 */
33204eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
33214eb0716eSAlexandre Ghiti 		if (count > persistent_huge_pages(h)) {
3322db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
332329383967SMike Kravetz 			mutex_unlock(&h->resize_lock);
3324f60858f9SMike Kravetz 			NODEMASK_FREE(node_alloc_noretry);
33254eb0716eSAlexandre Ghiti 			return -EINVAL;
33264eb0716eSAlexandre Ghiti 		}
33274eb0716eSAlexandre Ghiti 		/* Fall through to decrease pool */
33284eb0716eSAlexandre Ghiti 	}
3329aa888a74SAndi Kleen 
33307893d1d5SAdam Litke 	/*
33317893d1d5SAdam Litke 	 * Increase the pool size
33327893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
33337893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
3334d1c3fb1fSNishanth Aravamudan 	 *
33350c397daeSMichal Hocko 	 * We might race with alloc_surplus_huge_page() here and be unable
3336d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
3337d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
3338d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
3339d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
33407893d1d5SAdam Litke 	 */
3341a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
33426ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
33437893d1d5SAdam Litke 			break;
33447893d1d5SAdam Litke 	}
33457893d1d5SAdam Litke 
3346a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
33477893d1d5SAdam Litke 		/*
33487893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
33497893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
33507893d1d5SAdam Litke 		 * and reducing the surplus.
33517893d1d5SAdam Litke 		 */
3352db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
3353649920c6SJia He 
3354649920c6SJia He 		/* yield cpu to avoid soft lockup */
3355649920c6SJia He 		cond_resched();
3356649920c6SJia He 
3357f60858f9SMike Kravetz 		ret = alloc_pool_huge_page(h, nodes_allowed,
3358f60858f9SMike Kravetz 						node_alloc_noretry);
3359db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
33607893d1d5SAdam Litke 		if (!ret)
33617893d1d5SAdam Litke 			goto out;
33627893d1d5SAdam Litke 
3363536240f2SMel Gorman 		/* Bail for signals. Probably ctrl-c from user */
3364536240f2SMel Gorman 		if (signal_pending(current))
3365536240f2SMel Gorman 			goto out;
33667893d1d5SAdam Litke 	}
33677893d1d5SAdam Litke 
33687893d1d5SAdam Litke 	/*
33697893d1d5SAdam Litke 	 * Decrease the pool size
33707893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
33717893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
33727893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
33737893d1d5SAdam Litke 	 * to the desired size as pages become free.
3374d1c3fb1fSNishanth Aravamudan 	 *
3375d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
3376d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
3377d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
33780c397daeSMichal Hocko 	 * alloc_surplus_huge_page() is checking the global counter,
3379d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
3380d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
3381d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
33827893d1d5SAdam Litke 	 */
3383a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
33846b0c880dSAdam Litke 	min_count = max(count, min_count);
33856ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
338610c6ec49SMike Kravetz 
338710c6ec49SMike Kravetz 	/*
338810c6ec49SMike Kravetz 	 * Collect pages to be removed on list without dropping lock
338910c6ec49SMike Kravetz 	 */
3390a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
339110c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, nodes_allowed, 0);
339210c6ec49SMike Kravetz 		if (!page)
33931da177e4SLinus Torvalds 			break;
339410c6ec49SMike Kravetz 
339510c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
33961da177e4SLinus Torvalds 	}
339710c6ec49SMike Kravetz 	/* free the pages after dropping lock */
3398db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
339910c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3400b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3401db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
340210c6ec49SMike Kravetz 
3403a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
34046ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
34057893d1d5SAdam Litke 			break;
34067893d1d5SAdam Litke 	}
34077893d1d5SAdam Litke out:
34084eb0716eSAlexandre Ghiti 	h->max_huge_pages = persistent_huge_pages(h);
3409db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
341029383967SMike Kravetz 	mutex_unlock(&h->resize_lock);
34114eb0716eSAlexandre Ghiti 
3412f60858f9SMike Kravetz 	NODEMASK_FREE(node_alloc_noretry);
3413f60858f9SMike Kravetz 
34144eb0716eSAlexandre Ghiti 	return 0;
34151da177e4SLinus Torvalds }
34161da177e4SLinus Torvalds 
34178531fc6fSMike Kravetz static int demote_free_huge_page(struct hstate *h, struct page *page)
34188531fc6fSMike Kravetz {
34198531fc6fSMike Kravetz 	int i, nid = page_to_nid(page);
34208531fc6fSMike Kravetz 	struct hstate *target_hstate;
34218531fc6fSMike Kravetz 	int rc = 0;
34228531fc6fSMike Kravetz 
34238531fc6fSMike Kravetz 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
34248531fc6fSMike Kravetz 
34258531fc6fSMike Kravetz 	remove_hugetlb_page_for_demote(h, page, false);
34268531fc6fSMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
34278531fc6fSMike Kravetz 
34285981611dSMuchun Song 	rc = hugetlb_vmemmap_alloc(h, page);
34298531fc6fSMike Kravetz 	if (rc) {
34308531fc6fSMike Kravetz 		/* Allocation of vmemmmap failed, we can not demote page */
34318531fc6fSMike Kravetz 		spin_lock_irq(&hugetlb_lock);
34328531fc6fSMike Kravetz 		set_page_refcounted(page);
34338531fc6fSMike Kravetz 		add_hugetlb_page(h, page, false);
34348531fc6fSMike Kravetz 		return rc;
34358531fc6fSMike Kravetz 	}
34368531fc6fSMike Kravetz 
34378531fc6fSMike Kravetz 	/*
34388531fc6fSMike Kravetz 	 * Use destroy_compound_hugetlb_page_for_demote for all huge page
34398531fc6fSMike Kravetz 	 * sizes as it will not ref count pages.
34408531fc6fSMike Kravetz 	 */
34418531fc6fSMike Kravetz 	destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
34428531fc6fSMike Kravetz 
34438531fc6fSMike Kravetz 	/*
34448531fc6fSMike Kravetz 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
34458531fc6fSMike Kravetz 	 * Without the mutex, pages added to target hstate could be marked
34468531fc6fSMike Kravetz 	 * as surplus.
34478531fc6fSMike Kravetz 	 *
34488531fc6fSMike Kravetz 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
34498531fc6fSMike Kravetz 	 * use the convention of always taking larger size hstate mutex first.
34508531fc6fSMike Kravetz 	 */
34518531fc6fSMike Kravetz 	mutex_lock(&target_hstate->resize_lock);
34528531fc6fSMike Kravetz 	for (i = 0; i < pages_per_huge_page(h);
34538531fc6fSMike Kravetz 				i += pages_per_huge_page(target_hstate)) {
34548531fc6fSMike Kravetz 		if (hstate_is_gigantic(target_hstate))
34558531fc6fSMike Kravetz 			prep_compound_gigantic_page_for_demote(page + i,
34568531fc6fSMike Kravetz 							target_hstate->order);
34578531fc6fSMike Kravetz 		else
34588531fc6fSMike Kravetz 			prep_compound_page(page + i, target_hstate->order);
34598531fc6fSMike Kravetz 		set_page_private(page + i, 0);
34608531fc6fSMike Kravetz 		set_page_refcounted(page + i);
34618531fc6fSMike Kravetz 		prep_new_huge_page(target_hstate, page + i, nid);
34628531fc6fSMike Kravetz 		put_page(page + i);
34638531fc6fSMike Kravetz 	}
34648531fc6fSMike Kravetz 	mutex_unlock(&target_hstate->resize_lock);
34658531fc6fSMike Kravetz 
34668531fc6fSMike Kravetz 	spin_lock_irq(&hugetlb_lock);
34678531fc6fSMike Kravetz 
34688531fc6fSMike Kravetz 	/*
34698531fc6fSMike Kravetz 	 * Not absolutely necessary, but for consistency update max_huge_pages
34708531fc6fSMike Kravetz 	 * based on pool changes for the demoted page.
34718531fc6fSMike Kravetz 	 */
34728531fc6fSMike Kravetz 	h->max_huge_pages--;
34738531fc6fSMike Kravetz 	target_hstate->max_huge_pages += pages_per_huge_page(h);
34748531fc6fSMike Kravetz 
34758531fc6fSMike Kravetz 	return rc;
34768531fc6fSMike Kravetz }
34778531fc6fSMike Kravetz 
347879dfc695SMike Kravetz static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
347979dfc695SMike Kravetz 	__must_hold(&hugetlb_lock)
348079dfc695SMike Kravetz {
34818531fc6fSMike Kravetz 	int nr_nodes, node;
34828531fc6fSMike Kravetz 	struct page *page;
348379dfc695SMike Kravetz 
348479dfc695SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
348579dfc695SMike Kravetz 
348679dfc695SMike Kravetz 	/* We should never get here if no demote order */
348779dfc695SMike Kravetz 	if (!h->demote_order) {
348879dfc695SMike Kravetz 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
348979dfc695SMike Kravetz 		return -EINVAL;		/* internal error */
349079dfc695SMike Kravetz 	}
349179dfc695SMike Kravetz 
34928531fc6fSMike Kravetz 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
34935a317412SMike Kravetz 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
34945a317412SMike Kravetz 			if (PageHWPoison(page))
34955a317412SMike Kravetz 				continue;
34965a317412SMike Kravetz 
34975a317412SMike Kravetz 			return demote_free_huge_page(h, page);
34988531fc6fSMike Kravetz 		}
34998531fc6fSMike Kravetz 	}
35008531fc6fSMike Kravetz 
35015a317412SMike Kravetz 	/*
35025a317412SMike Kravetz 	 * Only way to get here is if all pages on free lists are poisoned.
35035a317412SMike Kravetz 	 * Return -EBUSY so that caller will not retry.
35045a317412SMike Kravetz 	 */
35055a317412SMike Kravetz 	return -EBUSY;
350679dfc695SMike Kravetz }
350779dfc695SMike Kravetz 
3508a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
3509a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3510a3437870SNishanth Aravamudan 
351179dfc695SMike Kravetz #define HSTATE_ATTR_WO(_name) \
351279dfc695SMike Kravetz 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
351379dfc695SMike Kravetz 
3514a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
351598bc26acSMiaohe Lin 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3516a3437870SNishanth Aravamudan 
3517a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
3518a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3519a3437870SNishanth Aravamudan 
35209a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
35219a305230SLee Schermerhorn 
35229a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3523a3437870SNishanth Aravamudan {
3524a3437870SNishanth Aravamudan 	int i;
35259a305230SLee Schermerhorn 
3526a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
35279a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
35289a305230SLee Schermerhorn 			if (nidp)
35299a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
3530a3437870SNishanth Aravamudan 			return &hstates[i];
35319a305230SLee Schermerhorn 		}
35329a305230SLee Schermerhorn 
35339a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
3534a3437870SNishanth Aravamudan }
3535a3437870SNishanth Aravamudan 
353606808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3537a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3538a3437870SNishanth Aravamudan {
35399a305230SLee Schermerhorn 	struct hstate *h;
35409a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
35419a305230SLee Schermerhorn 	int nid;
35429a305230SLee Schermerhorn 
35439a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
35449a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
35459a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
35469a305230SLee Schermerhorn 	else
35479a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
35489a305230SLee Schermerhorn 
3549ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3550a3437870SNishanth Aravamudan }
3551adbe8726SEric B Munson 
3552238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3553238d3c13SDavid Rientjes 					   struct hstate *h, int nid,
3554238d3c13SDavid Rientjes 					   unsigned long count, size_t len)
3555a3437870SNishanth Aravamudan {
3556a3437870SNishanth Aravamudan 	int err;
35572d0adf7eSOscar Salvador 	nodemask_t nodes_allowed, *n_mask;
3558a3437870SNishanth Aravamudan 
35592d0adf7eSOscar Salvador 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
35602d0adf7eSOscar Salvador 		return -EINVAL;
3561adbe8726SEric B Munson 
35629a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
35639a305230SLee Schermerhorn 		/*
35649a305230SLee Schermerhorn 		 * global hstate attribute
35659a305230SLee Schermerhorn 		 */
35669a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
35672d0adf7eSOscar Salvador 				init_nodemask_of_mempolicy(&nodes_allowed)))
35682d0adf7eSOscar Salvador 			n_mask = &node_states[N_MEMORY];
35692d0adf7eSOscar Salvador 		else
35702d0adf7eSOscar Salvador 			n_mask = &nodes_allowed;
35712d0adf7eSOscar Salvador 	} else {
35729a305230SLee Schermerhorn 		/*
3573fd875dcaSMike Kravetz 		 * Node specific request.  count adjustment happens in
3574fd875dcaSMike Kravetz 		 * set_max_huge_pages() after acquiring hugetlb_lock.
35759a305230SLee Schermerhorn 		 */
35762d0adf7eSOscar Salvador 		init_nodemask_of_node(&nodes_allowed, nid);
35772d0adf7eSOscar Salvador 		n_mask = &nodes_allowed;
3578fd875dcaSMike Kravetz 	}
35799a305230SLee Schermerhorn 
35802d0adf7eSOscar Salvador 	err = set_max_huge_pages(h, count, nid, n_mask);
358106808b08SLee Schermerhorn 
35824eb0716eSAlexandre Ghiti 	return err ? err : len;
358306808b08SLee Schermerhorn }
358406808b08SLee Schermerhorn 
3585238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3586238d3c13SDavid Rientjes 					 struct kobject *kobj, const char *buf,
3587238d3c13SDavid Rientjes 					 size_t len)
3588238d3c13SDavid Rientjes {
3589238d3c13SDavid Rientjes 	struct hstate *h;
3590238d3c13SDavid Rientjes 	unsigned long count;
3591238d3c13SDavid Rientjes 	int nid;
3592238d3c13SDavid Rientjes 	int err;
3593238d3c13SDavid Rientjes 
3594238d3c13SDavid Rientjes 	err = kstrtoul(buf, 10, &count);
3595238d3c13SDavid Rientjes 	if (err)
3596238d3c13SDavid Rientjes 		return err;
3597238d3c13SDavid Rientjes 
3598238d3c13SDavid Rientjes 	h = kobj_to_hstate(kobj, &nid);
3599238d3c13SDavid Rientjes 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3600238d3c13SDavid Rientjes }
3601238d3c13SDavid Rientjes 
360206808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
360306808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
360406808b08SLee Schermerhorn {
360506808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
360606808b08SLee Schermerhorn }
360706808b08SLee Schermerhorn 
360806808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
360906808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
361006808b08SLee Schermerhorn {
3611238d3c13SDavid Rientjes 	return nr_hugepages_store_common(false, kobj, buf, len);
3612a3437870SNishanth Aravamudan }
3613a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
3614a3437870SNishanth Aravamudan 
361506808b08SLee Schermerhorn #ifdef CONFIG_NUMA
361606808b08SLee Schermerhorn 
361706808b08SLee Schermerhorn /*
361806808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
361906808b08SLee Schermerhorn  * huge page alloc/free.
362006808b08SLee Schermerhorn  */
362106808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3622ae7a927dSJoe Perches 					   struct kobj_attribute *attr,
3623ae7a927dSJoe Perches 					   char *buf)
362406808b08SLee Schermerhorn {
362506808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
362606808b08SLee Schermerhorn }
362706808b08SLee Schermerhorn 
362806808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
362906808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
363006808b08SLee Schermerhorn {
3631238d3c13SDavid Rientjes 	return nr_hugepages_store_common(true, kobj, buf, len);
363206808b08SLee Schermerhorn }
363306808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
363406808b08SLee Schermerhorn #endif
363506808b08SLee Schermerhorn 
363606808b08SLee Schermerhorn 
3637a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3638a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3639a3437870SNishanth Aravamudan {
36409a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3641ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3642a3437870SNishanth Aravamudan }
3643adbe8726SEric B Munson 
3644a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3645a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
3646a3437870SNishanth Aravamudan {
3647a3437870SNishanth Aravamudan 	int err;
3648a3437870SNishanth Aravamudan 	unsigned long input;
36499a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3650a3437870SNishanth Aravamudan 
3651bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3652adbe8726SEric B Munson 		return -EINVAL;
3653adbe8726SEric B Munson 
36543dbb95f7SJingoo Han 	err = kstrtoul(buf, 10, &input);
3655a3437870SNishanth Aravamudan 	if (err)
365673ae31e5SEric B Munson 		return err;
3657a3437870SNishanth Aravamudan 
3658db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
3659a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
3660db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
3661a3437870SNishanth Aravamudan 
3662a3437870SNishanth Aravamudan 	return count;
3663a3437870SNishanth Aravamudan }
3664a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
3665a3437870SNishanth Aravamudan 
3666a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
3667a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3668a3437870SNishanth Aravamudan {
36699a305230SLee Schermerhorn 	struct hstate *h;
36709a305230SLee Schermerhorn 	unsigned long free_huge_pages;
36719a305230SLee Schermerhorn 	int nid;
36729a305230SLee Schermerhorn 
36739a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
36749a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
36759a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
36769a305230SLee Schermerhorn 	else
36779a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
36789a305230SLee Schermerhorn 
3679ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3680a3437870SNishanth Aravamudan }
3681a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
3682a3437870SNishanth Aravamudan 
3683a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
3684a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3685a3437870SNishanth Aravamudan {
36869a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3687ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3688a3437870SNishanth Aravamudan }
3689a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
3690a3437870SNishanth Aravamudan 
3691a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
3692a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3693a3437870SNishanth Aravamudan {
36949a305230SLee Schermerhorn 	struct hstate *h;
36959a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
36969a305230SLee Schermerhorn 	int nid;
36979a305230SLee Schermerhorn 
36989a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
36999a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
37009a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
37019a305230SLee Schermerhorn 	else
37029a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
37039a305230SLee Schermerhorn 
3704ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3705a3437870SNishanth Aravamudan }
3706a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
3707a3437870SNishanth Aravamudan 
370879dfc695SMike Kravetz static ssize_t demote_store(struct kobject *kobj,
370979dfc695SMike Kravetz 	       struct kobj_attribute *attr, const char *buf, size_t len)
371079dfc695SMike Kravetz {
371179dfc695SMike Kravetz 	unsigned long nr_demote;
371279dfc695SMike Kravetz 	unsigned long nr_available;
371379dfc695SMike Kravetz 	nodemask_t nodes_allowed, *n_mask;
371479dfc695SMike Kravetz 	struct hstate *h;
371579dfc695SMike Kravetz 	int err = 0;
371679dfc695SMike Kravetz 	int nid;
371779dfc695SMike Kravetz 
371879dfc695SMike Kravetz 	err = kstrtoul(buf, 10, &nr_demote);
371979dfc695SMike Kravetz 	if (err)
372079dfc695SMike Kravetz 		return err;
372179dfc695SMike Kravetz 	h = kobj_to_hstate(kobj, &nid);
372279dfc695SMike Kravetz 
372379dfc695SMike Kravetz 	if (nid != NUMA_NO_NODE) {
372479dfc695SMike Kravetz 		init_nodemask_of_node(&nodes_allowed, nid);
372579dfc695SMike Kravetz 		n_mask = &nodes_allowed;
372679dfc695SMike Kravetz 	} else {
372779dfc695SMike Kravetz 		n_mask = &node_states[N_MEMORY];
372879dfc695SMike Kravetz 	}
372979dfc695SMike Kravetz 
373079dfc695SMike Kravetz 	/* Synchronize with other sysfs operations modifying huge pages */
373179dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
373279dfc695SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
373379dfc695SMike Kravetz 
373479dfc695SMike Kravetz 	while (nr_demote) {
373579dfc695SMike Kravetz 		/*
373679dfc695SMike Kravetz 		 * Check for available pages to demote each time thorough the
373779dfc695SMike Kravetz 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
373879dfc695SMike Kravetz 		 */
373979dfc695SMike Kravetz 		if (nid != NUMA_NO_NODE)
374079dfc695SMike Kravetz 			nr_available = h->free_huge_pages_node[nid];
374179dfc695SMike Kravetz 		else
374279dfc695SMike Kravetz 			nr_available = h->free_huge_pages;
374379dfc695SMike Kravetz 		nr_available -= h->resv_huge_pages;
374479dfc695SMike Kravetz 		if (!nr_available)
374579dfc695SMike Kravetz 			break;
374679dfc695SMike Kravetz 
374779dfc695SMike Kravetz 		err = demote_pool_huge_page(h, n_mask);
374879dfc695SMike Kravetz 		if (err)
374979dfc695SMike Kravetz 			break;
375079dfc695SMike Kravetz 
375179dfc695SMike Kravetz 		nr_demote--;
375279dfc695SMike Kravetz 	}
375379dfc695SMike Kravetz 
375479dfc695SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
375579dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
375679dfc695SMike Kravetz 
375779dfc695SMike Kravetz 	if (err)
375879dfc695SMike Kravetz 		return err;
375979dfc695SMike Kravetz 	return len;
376079dfc695SMike Kravetz }
376179dfc695SMike Kravetz HSTATE_ATTR_WO(demote);
376279dfc695SMike Kravetz 
376379dfc695SMike Kravetz static ssize_t demote_size_show(struct kobject *kobj,
376479dfc695SMike Kravetz 					struct kobj_attribute *attr, char *buf)
376579dfc695SMike Kravetz {
376679dfc695SMike Kravetz 	int nid;
376779dfc695SMike Kravetz 	struct hstate *h = kobj_to_hstate(kobj, &nid);
376879dfc695SMike Kravetz 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
376979dfc695SMike Kravetz 
377079dfc695SMike Kravetz 	return sysfs_emit(buf, "%lukB\n", demote_size);
377179dfc695SMike Kravetz }
377279dfc695SMike Kravetz 
377379dfc695SMike Kravetz static ssize_t demote_size_store(struct kobject *kobj,
377479dfc695SMike Kravetz 					struct kobj_attribute *attr,
377579dfc695SMike Kravetz 					const char *buf, size_t count)
377679dfc695SMike Kravetz {
377779dfc695SMike Kravetz 	struct hstate *h, *demote_hstate;
377879dfc695SMike Kravetz 	unsigned long demote_size;
377979dfc695SMike Kravetz 	unsigned int demote_order;
378079dfc695SMike Kravetz 	int nid;
378179dfc695SMike Kravetz 
378279dfc695SMike Kravetz 	demote_size = (unsigned long)memparse(buf, NULL);
378379dfc695SMike Kravetz 
378479dfc695SMike Kravetz 	demote_hstate = size_to_hstate(demote_size);
378579dfc695SMike Kravetz 	if (!demote_hstate)
378679dfc695SMike Kravetz 		return -EINVAL;
378779dfc695SMike Kravetz 	demote_order = demote_hstate->order;
3788a01f4390SMike Kravetz 	if (demote_order < HUGETLB_PAGE_ORDER)
3789a01f4390SMike Kravetz 		return -EINVAL;
379079dfc695SMike Kravetz 
379179dfc695SMike Kravetz 	/* demote order must be smaller than hstate order */
379279dfc695SMike Kravetz 	h = kobj_to_hstate(kobj, &nid);
379379dfc695SMike Kravetz 	if (demote_order >= h->order)
379479dfc695SMike Kravetz 		return -EINVAL;
379579dfc695SMike Kravetz 
379679dfc695SMike Kravetz 	/* resize_lock synchronizes access to demote size and writes */
379779dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
379879dfc695SMike Kravetz 	h->demote_order = demote_order;
379979dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
380079dfc695SMike Kravetz 
380179dfc695SMike Kravetz 	return count;
380279dfc695SMike Kravetz }
380379dfc695SMike Kravetz HSTATE_ATTR(demote_size);
380479dfc695SMike Kravetz 
3805a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
3806a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
3807a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
3808a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
3809a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
3810a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
381106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
381206808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
381306808b08SLee Schermerhorn #endif
3814a3437870SNishanth Aravamudan 	NULL,
3815a3437870SNishanth Aravamudan };
3816a3437870SNishanth Aravamudan 
381767e5ed96SArvind Yadav static const struct attribute_group hstate_attr_group = {
3818a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
3819a3437870SNishanth Aravamudan };
3820a3437870SNishanth Aravamudan 
382179dfc695SMike Kravetz static struct attribute *hstate_demote_attrs[] = {
382279dfc695SMike Kravetz 	&demote_size_attr.attr,
382379dfc695SMike Kravetz 	&demote_attr.attr,
382479dfc695SMike Kravetz 	NULL,
382579dfc695SMike Kravetz };
382679dfc695SMike Kravetz 
382779dfc695SMike Kravetz static const struct attribute_group hstate_demote_attr_group = {
382879dfc695SMike Kravetz 	.attrs = hstate_demote_attrs,
382979dfc695SMike Kravetz };
383079dfc695SMike Kravetz 
3831094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
38329a305230SLee Schermerhorn 				    struct kobject **hstate_kobjs,
383367e5ed96SArvind Yadav 				    const struct attribute_group *hstate_attr_group)
3834a3437870SNishanth Aravamudan {
3835a3437870SNishanth Aravamudan 	int retval;
3836972dc4deSAneesh Kumar K.V 	int hi = hstate_index(h);
3837a3437870SNishanth Aravamudan 
38389a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
38399a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
3840a3437870SNishanth Aravamudan 		return -ENOMEM;
3841a3437870SNishanth Aravamudan 
38429a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3843cc2205a6SMiaohe Lin 	if (retval) {
38449a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
3845cc2205a6SMiaohe Lin 		hstate_kobjs[hi] = NULL;
3846cc2205a6SMiaohe Lin 	}
3847a3437870SNishanth Aravamudan 
384879dfc695SMike Kravetz 	if (h->demote_order) {
384979dfc695SMike Kravetz 		if (sysfs_create_group(hstate_kobjs[hi],
385079dfc695SMike Kravetz 					&hstate_demote_attr_group))
385179dfc695SMike Kravetz 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
385279dfc695SMike Kravetz 	}
385379dfc695SMike Kravetz 
3854a3437870SNishanth Aravamudan 	return retval;
3855a3437870SNishanth Aravamudan }
3856a3437870SNishanth Aravamudan 
3857a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void)
3858a3437870SNishanth Aravamudan {
3859a3437870SNishanth Aravamudan 	struct hstate *h;
3860a3437870SNishanth Aravamudan 	int err;
3861a3437870SNishanth Aravamudan 
3862a3437870SNishanth Aravamudan 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3863a3437870SNishanth Aravamudan 	if (!hugepages_kobj)
3864a3437870SNishanth Aravamudan 		return;
3865a3437870SNishanth Aravamudan 
3866a3437870SNishanth Aravamudan 	for_each_hstate(h) {
38679a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
38689a305230SLee Schermerhorn 					 hstate_kobjs, &hstate_attr_group);
3869a3437870SNishanth Aravamudan 		if (err)
3870282f4214SMike Kravetz 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3871a3437870SNishanth Aravamudan 	}
3872a3437870SNishanth Aravamudan }
3873a3437870SNishanth Aravamudan 
38749a305230SLee Schermerhorn #ifdef CONFIG_NUMA
38759a305230SLee Schermerhorn 
38769a305230SLee Schermerhorn /*
38779a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
387810fbcf4cSKay Sievers  * with node devices in node_devices[] using a parallel array.  The array
387910fbcf4cSKay Sievers  * index of a node device or _hstate == node id.
388010fbcf4cSKay Sievers  * This is here to avoid any static dependency of the node device driver, in
38819a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
38829a305230SLee Schermerhorn  */
38839a305230SLee Schermerhorn struct node_hstate {
38849a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
38859a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
38869a305230SLee Schermerhorn };
3887b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
38889a305230SLee Schermerhorn 
38899a305230SLee Schermerhorn /*
389010fbcf4cSKay Sievers  * A subset of global hstate attributes for node devices
38919a305230SLee Schermerhorn  */
38929a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
38939a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
38949a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
38959a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
38969a305230SLee Schermerhorn 	NULL,
38979a305230SLee Schermerhorn };
38989a305230SLee Schermerhorn 
389967e5ed96SArvind Yadav static const struct attribute_group per_node_hstate_attr_group = {
39009a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
39019a305230SLee Schermerhorn };
39029a305230SLee Schermerhorn 
39039a305230SLee Schermerhorn /*
390410fbcf4cSKay Sievers  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
39059a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
39069a305230SLee Schermerhorn  */
39079a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
39089a305230SLee Schermerhorn {
39099a305230SLee Schermerhorn 	int nid;
39109a305230SLee Schermerhorn 
39119a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
39129a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
39139a305230SLee Schermerhorn 		int i;
39149a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
39159a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
39169a305230SLee Schermerhorn 				if (nidp)
39179a305230SLee Schermerhorn 					*nidp = nid;
39189a305230SLee Schermerhorn 				return &hstates[i];
39199a305230SLee Schermerhorn 			}
39209a305230SLee Schermerhorn 	}
39219a305230SLee Schermerhorn 
39229a305230SLee Schermerhorn 	BUG();
39239a305230SLee Schermerhorn 	return NULL;
39249a305230SLee Schermerhorn }
39259a305230SLee Schermerhorn 
39269a305230SLee Schermerhorn /*
392710fbcf4cSKay Sievers  * Unregister hstate attributes from a single node device.
39289a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
39299a305230SLee Schermerhorn  */
39303cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node)
39319a305230SLee Schermerhorn {
39329a305230SLee Schermerhorn 	struct hstate *h;
393310fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39349a305230SLee Schermerhorn 
39359a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39369b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
39379a305230SLee Schermerhorn 
3938972dc4deSAneesh Kumar K.V 	for_each_hstate(h) {
3939972dc4deSAneesh Kumar K.V 		int idx = hstate_index(h);
3940972dc4deSAneesh Kumar K.V 		if (nhs->hstate_kobjs[idx]) {
3941972dc4deSAneesh Kumar K.V 			kobject_put(nhs->hstate_kobjs[idx]);
3942972dc4deSAneesh Kumar K.V 			nhs->hstate_kobjs[idx] = NULL;
3943972dc4deSAneesh Kumar K.V 		}
39449a305230SLee Schermerhorn 	}
39459a305230SLee Schermerhorn 
39469a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
39479a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
39489a305230SLee Schermerhorn }
39499a305230SLee Schermerhorn 
39509a305230SLee Schermerhorn 
39519a305230SLee Schermerhorn /*
395210fbcf4cSKay Sievers  * Register hstate attributes for a single node device.
39539a305230SLee Schermerhorn  * No-op if attributes already registered.
39549a305230SLee Schermerhorn  */
39553cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node)
39569a305230SLee Schermerhorn {
39579a305230SLee Schermerhorn 	struct hstate *h;
395810fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39599a305230SLee Schermerhorn 	int err;
39609a305230SLee Schermerhorn 
39619a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
39629a305230SLee Schermerhorn 		return;		/* already allocated */
39639a305230SLee Schermerhorn 
39649a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
396510fbcf4cSKay Sievers 							&node->dev.kobj);
39669a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39679a305230SLee Schermerhorn 		return;
39689a305230SLee Schermerhorn 
39699a305230SLee Schermerhorn 	for_each_hstate(h) {
39709a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
39719a305230SLee Schermerhorn 						nhs->hstate_kobjs,
39729a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
39739a305230SLee Schermerhorn 		if (err) {
3974282f4214SMike Kravetz 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
397510fbcf4cSKay Sievers 				h->name, node->dev.id);
39769a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
39779a305230SLee Schermerhorn 			break;
39789a305230SLee Schermerhorn 		}
39799a305230SLee Schermerhorn 	}
39809a305230SLee Schermerhorn }
39819a305230SLee Schermerhorn 
39829a305230SLee Schermerhorn /*
39839b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
398410fbcf4cSKay Sievers  * devices of nodes that have memory.  All on-line nodes should have
398510fbcf4cSKay Sievers  * registered their associated device by this time.
39869a305230SLee Schermerhorn  */
39877d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
39889a305230SLee Schermerhorn {
39899a305230SLee Schermerhorn 	int nid;
39909a305230SLee Schermerhorn 
39918cebfcd0SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
39928732794bSWen Congyang 		struct node *node = node_devices[nid];
399310fbcf4cSKay Sievers 		if (node->dev.id == nid)
39949a305230SLee Schermerhorn 			hugetlb_register_node(node);
39959a305230SLee Schermerhorn 	}
39969a305230SLee Schermerhorn 
39979a305230SLee Schermerhorn 	/*
399810fbcf4cSKay Sievers 	 * Let the node device driver know we're here so it can
39999a305230SLee Schermerhorn 	 * [un]register hstate attributes on node hotplug.
40009a305230SLee Schermerhorn 	 */
40019a305230SLee Schermerhorn 	register_hugetlbfs_with_node(hugetlb_register_node,
40029a305230SLee Schermerhorn 				     hugetlb_unregister_node);
40039a305230SLee Schermerhorn }
40049a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
40059a305230SLee Schermerhorn 
40069a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
40079a305230SLee Schermerhorn {
40089a305230SLee Schermerhorn 	BUG();
40099a305230SLee Schermerhorn 	if (nidp)
40109a305230SLee Schermerhorn 		*nidp = -1;
40119a305230SLee Schermerhorn 	return NULL;
40129a305230SLee Schermerhorn }
40139a305230SLee Schermerhorn 
40149a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
40159a305230SLee Schermerhorn 
40169a305230SLee Schermerhorn #endif
40179a305230SLee Schermerhorn 
4018a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
4019a3437870SNishanth Aravamudan {
40208382d914SDavidlohr Bueso 	int i;
40218382d914SDavidlohr Bueso 
4022d6995da3SMike Kravetz 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4023d6995da3SMike Kravetz 			__NR_HPAGEFLAGS);
4024d6995da3SMike Kravetz 
4025c2833a5bSMike Kravetz 	if (!hugepages_supported()) {
4026c2833a5bSMike Kravetz 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4027c2833a5bSMike Kravetz 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
40280ef89d25SBenjamin Herrenschmidt 		return 0;
4029d715cf80SLiam R. Howlett 	}
4030d715cf80SLiam R. Howlett 
4031282f4214SMike Kravetz 	/*
4032282f4214SMike Kravetz 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4033282f4214SMike Kravetz 	 * architectures depend on setup being done here.
4034282f4214SMike Kravetz 	 */
4035a3437870SNishanth Aravamudan 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4036282f4214SMike Kravetz 	if (!parsed_default_hugepagesz) {
4037282f4214SMike Kravetz 		/*
4038282f4214SMike Kravetz 		 * If we did not parse a default huge page size, set
4039282f4214SMike Kravetz 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4040282f4214SMike Kravetz 		 * number of huge pages for this default size was implicitly
4041282f4214SMike Kravetz 		 * specified, set that here as well.
4042282f4214SMike Kravetz 		 * Note that the implicit setting will overwrite an explicit
4043282f4214SMike Kravetz 		 * setting.  A warning will be printed in this case.
4044282f4214SMike Kravetz 		 */
4045282f4214SMike Kravetz 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4046f8b74815SVaishali Thakkar 		if (default_hstate_max_huge_pages) {
4047282f4214SMike Kravetz 			if (default_hstate.max_huge_pages) {
4048282f4214SMike Kravetz 				char buf[32];
4049282f4214SMike Kravetz 
4050282f4214SMike Kravetz 				string_get_size(huge_page_size(&default_hstate),
4051282f4214SMike Kravetz 					1, STRING_UNITS_2, buf, 32);
4052282f4214SMike Kravetz 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4053282f4214SMike Kravetz 					default_hstate.max_huge_pages, buf);
4054282f4214SMike Kravetz 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4055282f4214SMike Kravetz 					default_hstate_max_huge_pages);
4056282f4214SMike Kravetz 			}
4057282f4214SMike Kravetz 			default_hstate.max_huge_pages =
4058282f4214SMike Kravetz 				default_hstate_max_huge_pages;
4059b5389086SZhenguo Yao 
40600a7a0f6fSPeng Liu 			for_each_online_node(i)
4061b5389086SZhenguo Yao 				default_hstate.max_huge_pages_node[i] =
4062b5389086SZhenguo Yao 					default_hugepages_in_node[i];
4063282f4214SMike Kravetz 		}
4064f8b74815SVaishali Thakkar 	}
4065a3437870SNishanth Aravamudan 
4066cf11e85fSRoman Gushchin 	hugetlb_cma_check();
4067a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
4068aa888a74SAndi Kleen 	gather_bootmem_prealloc();
4069a3437870SNishanth Aravamudan 	report_hugepages();
4070a3437870SNishanth Aravamudan 
4071a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
40729a305230SLee Schermerhorn 	hugetlb_register_all_nodes();
40737179e7bfSJianguo Wu 	hugetlb_cgroup_file_init();
40749a305230SLee Schermerhorn 
40758382d914SDavidlohr Bueso #ifdef CONFIG_SMP
40768382d914SDavidlohr Bueso 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
40778382d914SDavidlohr Bueso #else
40788382d914SDavidlohr Bueso 	num_fault_mutexes = 1;
40798382d914SDavidlohr Bueso #endif
4080c672c7f2SMike Kravetz 	hugetlb_fault_mutex_table =
40816da2ec56SKees Cook 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
40826da2ec56SKees Cook 			      GFP_KERNEL);
4083c672c7f2SMike Kravetz 	BUG_ON(!hugetlb_fault_mutex_table);
40848382d914SDavidlohr Bueso 
40858382d914SDavidlohr Bueso 	for (i = 0; i < num_fault_mutexes; i++)
4086c672c7f2SMike Kravetz 		mutex_init(&hugetlb_fault_mutex_table[i]);
4087a3437870SNishanth Aravamudan 	return 0;
4088a3437870SNishanth Aravamudan }
40893e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
4090a3437870SNishanth Aravamudan 
4091ae94da89SMike Kravetz /* Overwritten by architectures with more huge page sizes */
4092ae94da89SMike Kravetz bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
40939fee021dSVaishali Thakkar {
4094ae94da89SMike Kravetz 	return size == HPAGE_SIZE;
40959fee021dSVaishali Thakkar }
40969fee021dSVaishali Thakkar 
4097d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
4098a3437870SNishanth Aravamudan {
4099a3437870SNishanth Aravamudan 	struct hstate *h;
41008faa8b07SAndi Kleen 	unsigned long i;
41018faa8b07SAndi Kleen 
4102a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
4103a3437870SNishanth Aravamudan 		return;
4104a3437870SNishanth Aravamudan 	}
410547d38344SAneesh Kumar K.V 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4106a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
410747d38344SAneesh Kumar K.V 	h = &hstates[hugetlb_max_hstate++];
410829383967SMike Kravetz 	mutex_init(&h->resize_lock);
4109a3437870SNishanth Aravamudan 	h->order = order;
4110aca78307SMiaohe Lin 	h->mask = ~(huge_page_size(h) - 1);
41118faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
41128faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
41130edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&h->hugepage_activelist);
411454f18d35SAndrew Morton 	h->next_nid_to_alloc = first_memory_node;
411554f18d35SAndrew Morton 	h->next_nid_to_free = first_memory_node;
4116a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4117a3437870SNishanth Aravamudan 					huge_page_size(h)/1024);
411877490587SMuchun Song 	hugetlb_vmemmap_init(h);
41198faa8b07SAndi Kleen 
4120a3437870SNishanth Aravamudan 	parsed_hstate = h;
4121a3437870SNishanth Aravamudan }
4122a3437870SNishanth Aravamudan 
4123b5389086SZhenguo Yao bool __init __weak hugetlb_node_alloc_supported(void)
4124b5389086SZhenguo Yao {
4125b5389086SZhenguo Yao 	return true;
4126b5389086SZhenguo Yao }
4127f87442f4SPeng Liu 
4128f87442f4SPeng Liu static void __init hugepages_clear_pages_in_node(void)
4129f87442f4SPeng Liu {
4130f87442f4SPeng Liu 	if (!hugetlb_max_hstate) {
4131f87442f4SPeng Liu 		default_hstate_max_huge_pages = 0;
4132f87442f4SPeng Liu 		memset(default_hugepages_in_node, 0,
4133f87442f4SPeng Liu 			MAX_NUMNODES * sizeof(unsigned int));
4134f87442f4SPeng Liu 	} else {
4135f87442f4SPeng Liu 		parsed_hstate->max_huge_pages = 0;
4136f87442f4SPeng Liu 		memset(parsed_hstate->max_huge_pages_node, 0,
4137f87442f4SPeng Liu 			MAX_NUMNODES * sizeof(unsigned int));
4138f87442f4SPeng Liu 	}
4139f87442f4SPeng Liu }
4140f87442f4SPeng Liu 
4141282f4214SMike Kravetz /*
4142282f4214SMike Kravetz  * hugepages command line processing
4143282f4214SMike Kravetz  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4144282f4214SMike Kravetz  * specification.  If not, ignore the hugepages value.  hugepages can also
4145282f4214SMike Kravetz  * be the first huge page command line  option in which case it implicitly
4146282f4214SMike Kravetz  * specifies the number of huge pages for the default size.
4147282f4214SMike Kravetz  */
4148282f4214SMike Kravetz static int __init hugepages_setup(char *s)
4149a3437870SNishanth Aravamudan {
4150a3437870SNishanth Aravamudan 	unsigned long *mhp;
41518faa8b07SAndi Kleen 	static unsigned long *last_mhp;
4152b5389086SZhenguo Yao 	int node = NUMA_NO_NODE;
4153b5389086SZhenguo Yao 	int count;
4154b5389086SZhenguo Yao 	unsigned long tmp;
4155b5389086SZhenguo Yao 	char *p = s;
4156a3437870SNishanth Aravamudan 
41579fee021dSVaishali Thakkar 	if (!parsed_valid_hugepagesz) {
4158282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
41599fee021dSVaishali Thakkar 		parsed_valid_hugepagesz = true;
4160f81f6e4bSPeng Liu 		return 1;
41619fee021dSVaishali Thakkar 	}
4162282f4214SMike Kravetz 
4163a3437870SNishanth Aravamudan 	/*
4164282f4214SMike Kravetz 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4165282f4214SMike Kravetz 	 * yet, so this hugepages= parameter goes to the "default hstate".
4166282f4214SMike Kravetz 	 * Otherwise, it goes with the previously parsed hugepagesz or
4167282f4214SMike Kravetz 	 * default_hugepagesz.
4168a3437870SNishanth Aravamudan 	 */
41699fee021dSVaishali Thakkar 	else if (!hugetlb_max_hstate)
4170a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
4171a3437870SNishanth Aravamudan 	else
4172a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
4173a3437870SNishanth Aravamudan 
41748faa8b07SAndi Kleen 	if (mhp == last_mhp) {
4175282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4176f81f6e4bSPeng Liu 		return 1;
41778faa8b07SAndi Kleen 	}
41788faa8b07SAndi Kleen 
4179b5389086SZhenguo Yao 	while (*p) {
4180b5389086SZhenguo Yao 		count = 0;
4181b5389086SZhenguo Yao 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4182b5389086SZhenguo Yao 			goto invalid;
4183b5389086SZhenguo Yao 		/* Parameter is node format */
4184b5389086SZhenguo Yao 		if (p[count] == ':') {
4185b5389086SZhenguo Yao 			if (!hugetlb_node_alloc_supported()) {
4186b5389086SZhenguo Yao 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4187f81f6e4bSPeng Liu 				return 1;
4188b5389086SZhenguo Yao 			}
41890a7a0f6fSPeng Liu 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4190e79ce983SLiu Yuntao 				goto invalid;
41910a7a0f6fSPeng Liu 			node = array_index_nospec(tmp, MAX_NUMNODES);
4192b5389086SZhenguo Yao 			p += count + 1;
4193b5389086SZhenguo Yao 			/* Parse hugepages */
4194b5389086SZhenguo Yao 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4195b5389086SZhenguo Yao 				goto invalid;
4196b5389086SZhenguo Yao 			if (!hugetlb_max_hstate)
4197b5389086SZhenguo Yao 				default_hugepages_in_node[node] = tmp;
4198b5389086SZhenguo Yao 			else
4199b5389086SZhenguo Yao 				parsed_hstate->max_huge_pages_node[node] = tmp;
4200b5389086SZhenguo Yao 			*mhp += tmp;
4201b5389086SZhenguo Yao 			/* Go to parse next node*/
4202b5389086SZhenguo Yao 			if (p[count] == ',')
4203b5389086SZhenguo Yao 				p += count + 1;
4204b5389086SZhenguo Yao 			else
4205b5389086SZhenguo Yao 				break;
4206b5389086SZhenguo Yao 		} else {
4207b5389086SZhenguo Yao 			if (p != s)
4208b5389086SZhenguo Yao 				goto invalid;
4209b5389086SZhenguo Yao 			*mhp = tmp;
4210b5389086SZhenguo Yao 			break;
4211b5389086SZhenguo Yao 		}
4212b5389086SZhenguo Yao 	}
4213a3437870SNishanth Aravamudan 
42148faa8b07SAndi Kleen 	/*
42158faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
421604adbc3fSMiaohe Lin 	 * But we need to allocate gigantic hstates here early to still
42178faa8b07SAndi Kleen 	 * use the bootmem allocator.
42188faa8b07SAndi Kleen 	 */
421904adbc3fSMiaohe Lin 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
42208faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
42218faa8b07SAndi Kleen 
42228faa8b07SAndi Kleen 	last_mhp = mhp;
42238faa8b07SAndi Kleen 
4224a3437870SNishanth Aravamudan 	return 1;
4225b5389086SZhenguo Yao 
4226b5389086SZhenguo Yao invalid:
4227b5389086SZhenguo Yao 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4228f87442f4SPeng Liu 	hugepages_clear_pages_in_node();
4229f81f6e4bSPeng Liu 	return 1;
4230a3437870SNishanth Aravamudan }
4231282f4214SMike Kravetz __setup("hugepages=", hugepages_setup);
4232e11bfbfcSNick Piggin 
4233282f4214SMike Kravetz /*
4234282f4214SMike Kravetz  * hugepagesz command line processing
4235282f4214SMike Kravetz  * A specific huge page size can only be specified once with hugepagesz.
4236282f4214SMike Kravetz  * hugepagesz is followed by hugepages on the command line.  The global
4237282f4214SMike Kravetz  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4238282f4214SMike Kravetz  * hugepagesz argument was valid.
4239282f4214SMike Kravetz  */
4240359f2544SMike Kravetz static int __init hugepagesz_setup(char *s)
4241e11bfbfcSNick Piggin {
4242359f2544SMike Kravetz 	unsigned long size;
4243282f4214SMike Kravetz 	struct hstate *h;
4244282f4214SMike Kravetz 
4245282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4246359f2544SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4247359f2544SMike Kravetz 
4248359f2544SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4249282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4250f81f6e4bSPeng Liu 		return 1;
4251359f2544SMike Kravetz 	}
4252359f2544SMike Kravetz 
4253282f4214SMike Kravetz 	h = size_to_hstate(size);
4254282f4214SMike Kravetz 	if (h) {
4255282f4214SMike Kravetz 		/*
4256282f4214SMike Kravetz 		 * hstate for this size already exists.  This is normally
4257282f4214SMike Kravetz 		 * an error, but is allowed if the existing hstate is the
4258282f4214SMike Kravetz 		 * default hstate.  More specifically, it is only allowed if
4259282f4214SMike Kravetz 		 * the number of huge pages for the default hstate was not
4260282f4214SMike Kravetz 		 * previously specified.
4261282f4214SMike Kravetz 		 */
4262282f4214SMike Kravetz 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4263282f4214SMike Kravetz 		    default_hstate.max_huge_pages) {
4264282f4214SMike Kravetz 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4265f81f6e4bSPeng Liu 			return 1;
426638237830SMike Kravetz 		}
426738237830SMike Kravetz 
4268282f4214SMike Kravetz 		/*
4269282f4214SMike Kravetz 		 * No need to call hugetlb_add_hstate() as hstate already
4270282f4214SMike Kravetz 		 * exists.  But, do set parsed_hstate so that a following
4271282f4214SMike Kravetz 		 * hugepages= parameter will be applied to this hstate.
4272282f4214SMike Kravetz 		 */
4273282f4214SMike Kravetz 		parsed_hstate = h;
4274282f4214SMike Kravetz 		parsed_valid_hugepagesz = true;
4275e11bfbfcSNick Piggin 		return 1;
4276e11bfbfcSNick Piggin 	}
4277282f4214SMike Kravetz 
4278359f2544SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4279282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4280359f2544SMike Kravetz 	return 1;
4281359f2544SMike Kravetz }
4282359f2544SMike Kravetz __setup("hugepagesz=", hugepagesz_setup);
4283359f2544SMike Kravetz 
4284282f4214SMike Kravetz /*
4285282f4214SMike Kravetz  * default_hugepagesz command line input
4286282f4214SMike Kravetz  * Only one instance of default_hugepagesz allowed on command line.
4287282f4214SMike Kravetz  */
4288ae94da89SMike Kravetz static int __init default_hugepagesz_setup(char *s)
4289e11bfbfcSNick Piggin {
4290ae94da89SMike Kravetz 	unsigned long size;
4291b5389086SZhenguo Yao 	int i;
4292ae94da89SMike Kravetz 
4293282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4294282f4214SMike Kravetz 	if (parsed_default_hugepagesz) {
4295282f4214SMike Kravetz 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4296f81f6e4bSPeng Liu 		return 1;
4297282f4214SMike Kravetz 	}
4298282f4214SMike Kravetz 
4299282f4214SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4300282f4214SMike Kravetz 
4301282f4214SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4302282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4303f81f6e4bSPeng Liu 		return 1;
4304282f4214SMike Kravetz 	}
4305282f4214SMike Kravetz 
4306282f4214SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4307282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4308282f4214SMike Kravetz 	parsed_default_hugepagesz = true;
4309282f4214SMike Kravetz 	default_hstate_idx = hstate_index(size_to_hstate(size));
4310282f4214SMike Kravetz 
4311282f4214SMike Kravetz 	/*
4312282f4214SMike Kravetz 	 * The number of default huge pages (for this size) could have been
4313282f4214SMike Kravetz 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4314282f4214SMike Kravetz 	 * then default_hstate_max_huge_pages is set.  If the default huge
4315282f4214SMike Kravetz 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4316282f4214SMike Kravetz 	 * allocated here from bootmem allocator.
4317282f4214SMike Kravetz 	 */
4318282f4214SMike Kravetz 	if (default_hstate_max_huge_pages) {
4319282f4214SMike Kravetz 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
43200a7a0f6fSPeng Liu 		for_each_online_node(i)
4321b5389086SZhenguo Yao 			default_hstate.max_huge_pages_node[i] =
4322b5389086SZhenguo Yao 				default_hugepages_in_node[i];
4323282f4214SMike Kravetz 		if (hstate_is_gigantic(&default_hstate))
4324282f4214SMike Kravetz 			hugetlb_hstate_alloc_pages(&default_hstate);
4325282f4214SMike Kravetz 		default_hstate_max_huge_pages = 0;
4326282f4214SMike Kravetz 	}
4327282f4214SMike Kravetz 
4328e11bfbfcSNick Piggin 	return 1;
4329e11bfbfcSNick Piggin }
4330ae94da89SMike Kravetz __setup("default_hugepagesz=", default_hugepagesz_setup);
4331a3437870SNishanth Aravamudan 
43328ca39e68SMuchun Song static unsigned int allowed_mems_nr(struct hstate *h)
43338a213460SNishanth Aravamudan {
43348a213460SNishanth Aravamudan 	int node;
43358a213460SNishanth Aravamudan 	unsigned int nr = 0;
43368ca39e68SMuchun Song 	nodemask_t *mpol_allowed;
43378ca39e68SMuchun Song 	unsigned int *array = h->free_huge_pages_node;
43388ca39e68SMuchun Song 	gfp_t gfp_mask = htlb_alloc_mask(h);
43398a213460SNishanth Aravamudan 
43408ca39e68SMuchun Song 	mpol_allowed = policy_nodemask_current(gfp_mask);
43418ca39e68SMuchun Song 
43428ca39e68SMuchun Song 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4343c93b0a99SJiapeng Zhong 		if (!mpol_allowed || node_isset(node, *mpol_allowed))
43448a213460SNishanth Aravamudan 			nr += array[node];
43458ca39e68SMuchun Song 	}
43468a213460SNishanth Aravamudan 
43478a213460SNishanth Aravamudan 	return nr;
43488a213460SNishanth Aravamudan }
43498a213460SNishanth Aravamudan 
43508a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
435117743798SMuchun Song static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
435217743798SMuchun Song 					  void *buffer, size_t *length,
435317743798SMuchun Song 					  loff_t *ppos, unsigned long *out)
435417743798SMuchun Song {
435517743798SMuchun Song 	struct ctl_table dup_table;
435617743798SMuchun Song 
435717743798SMuchun Song 	/*
435817743798SMuchun Song 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
435917743798SMuchun Song 	 * can duplicate the @table and alter the duplicate of it.
436017743798SMuchun Song 	 */
436117743798SMuchun Song 	dup_table = *table;
436217743798SMuchun Song 	dup_table.data = out;
436317743798SMuchun Song 
436417743798SMuchun Song 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
436517743798SMuchun Song }
436617743798SMuchun Song 
436706808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
436806808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
436932927393SChristoph Hellwig 			 void *buffer, size_t *length, loff_t *ppos)
43701da177e4SLinus Torvalds {
4371e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
4372238d3c13SDavid Rientjes 	unsigned long tmp = h->max_huge_pages;
437308d4a246SMichal Hocko 	int ret;
4374e5ff2159SAndi Kleen 
4375457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
437686613628SJan Stancek 		return -EOPNOTSUPP;
4377457c1b27SNishanth Aravamudan 
437817743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
437917743798SMuchun Song 					     &tmp);
438008d4a246SMichal Hocko 	if (ret)
438108d4a246SMichal Hocko 		goto out;
4382e5ff2159SAndi Kleen 
4383238d3c13SDavid Rientjes 	if (write)
4384238d3c13SDavid Rientjes 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4385238d3c13SDavid Rientjes 						  NUMA_NO_NODE, tmp, *length);
438608d4a246SMichal Hocko out:
438708d4a246SMichal Hocko 	return ret;
43881da177e4SLinus Torvalds }
4389396faf03SMel Gorman 
439006808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
439132927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
439206808b08SLee Schermerhorn {
439306808b08SLee Schermerhorn 
439406808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
439506808b08SLee Schermerhorn 							buffer, length, ppos);
439606808b08SLee Schermerhorn }
439706808b08SLee Schermerhorn 
439806808b08SLee Schermerhorn #ifdef CONFIG_NUMA
439906808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
440032927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
440106808b08SLee Schermerhorn {
440206808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
440306808b08SLee Schermerhorn 							buffer, length, ppos);
440406808b08SLee Schermerhorn }
440506808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
440606808b08SLee Schermerhorn 
4407a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
440832927393SChristoph Hellwig 		void *buffer, size_t *length, loff_t *ppos)
4409a3d0c6aaSNishanth Aravamudan {
4410a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
4411e5ff2159SAndi Kleen 	unsigned long tmp;
441208d4a246SMichal Hocko 	int ret;
4413e5ff2159SAndi Kleen 
4414457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
441586613628SJan Stancek 		return -EOPNOTSUPP;
4416457c1b27SNishanth Aravamudan 
4417e5ff2159SAndi Kleen 	tmp = h->nr_overcommit_huge_pages;
4418e5ff2159SAndi Kleen 
4419bae7f4aeSLuiz Capitulino 	if (write && hstate_is_gigantic(h))
4420adbe8726SEric B Munson 		return -EINVAL;
4421adbe8726SEric B Munson 
442217743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
442317743798SMuchun Song 					     &tmp);
442408d4a246SMichal Hocko 	if (ret)
442508d4a246SMichal Hocko 		goto out;
4426e5ff2159SAndi Kleen 
4427e5ff2159SAndi Kleen 	if (write) {
4428db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
4429e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
4430db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
4431e5ff2159SAndi Kleen 	}
443208d4a246SMichal Hocko out:
443308d4a246SMichal Hocko 	return ret;
4434a3d0c6aaSNishanth Aravamudan }
4435a3d0c6aaSNishanth Aravamudan 
44361da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
44371da177e4SLinus Torvalds 
4438e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
44391da177e4SLinus Torvalds {
4440fcb2b0c5SRoman Gushchin 	struct hstate *h;
4441fcb2b0c5SRoman Gushchin 	unsigned long total = 0;
4442fcb2b0c5SRoman Gushchin 
4443457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4444457c1b27SNishanth Aravamudan 		return;
4445fcb2b0c5SRoman Gushchin 
4446fcb2b0c5SRoman Gushchin 	for_each_hstate(h) {
4447fcb2b0c5SRoman Gushchin 		unsigned long count = h->nr_huge_pages;
4448fcb2b0c5SRoman Gushchin 
4449aca78307SMiaohe Lin 		total += huge_page_size(h) * count;
4450fcb2b0c5SRoman Gushchin 
4451fcb2b0c5SRoman Gushchin 		if (h == &default_hstate)
4452e1759c21SAlexey Dobriyan 			seq_printf(m,
44531da177e4SLinus Torvalds 				   "HugePages_Total:   %5lu\n"
44541da177e4SLinus Torvalds 				   "HugePages_Free:    %5lu\n"
4455b45b5bd6SDavid Gibson 				   "HugePages_Rsvd:    %5lu\n"
44567893d1d5SAdam Litke 				   "HugePages_Surp:    %5lu\n"
44574f98a2feSRik van Riel 				   "Hugepagesize:   %8lu kB\n",
4458fcb2b0c5SRoman Gushchin 				   count,
4459a5516438SAndi Kleen 				   h->free_huge_pages,
4460a5516438SAndi Kleen 				   h->resv_huge_pages,
4461a5516438SAndi Kleen 				   h->surplus_huge_pages,
4462aca78307SMiaohe Lin 				   huge_page_size(h) / SZ_1K);
4463fcb2b0c5SRoman Gushchin 	}
4464fcb2b0c5SRoman Gushchin 
4465aca78307SMiaohe Lin 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
44661da177e4SLinus Torvalds }
44671da177e4SLinus Torvalds 
44687981593bSJoe Perches int hugetlb_report_node_meminfo(char *buf, int len, int nid)
44691da177e4SLinus Torvalds {
4470a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
44717981593bSJoe Perches 
4472457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4473457c1b27SNishanth Aravamudan 		return 0;
44747981593bSJoe Perches 
44757981593bSJoe Perches 	return sysfs_emit_at(buf, len,
44761da177e4SLinus Torvalds 			     "Node %d HugePages_Total: %5u\n"
4477a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Free:  %5u\n"
4478a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Surp:  %5u\n",
4479a5516438SAndi Kleen 			     nid, h->nr_huge_pages_node[nid],
4480a5516438SAndi Kleen 			     nid, h->free_huge_pages_node[nid],
4481a5516438SAndi Kleen 			     nid, h->surplus_huge_pages_node[nid]);
44821da177e4SLinus Torvalds }
44831da177e4SLinus Torvalds 
4484949f7ec5SDavid Rientjes void hugetlb_show_meminfo(void)
4485949f7ec5SDavid Rientjes {
4486949f7ec5SDavid Rientjes 	struct hstate *h;
4487949f7ec5SDavid Rientjes 	int nid;
4488949f7ec5SDavid Rientjes 
4489457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4490457c1b27SNishanth Aravamudan 		return;
4491457c1b27SNishanth Aravamudan 
4492949f7ec5SDavid Rientjes 	for_each_node_state(nid, N_MEMORY)
4493949f7ec5SDavid Rientjes 		for_each_hstate(h)
4494949f7ec5SDavid Rientjes 			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4495949f7ec5SDavid Rientjes 				nid,
4496949f7ec5SDavid Rientjes 				h->nr_huge_pages_node[nid],
4497949f7ec5SDavid Rientjes 				h->free_huge_pages_node[nid],
4498949f7ec5SDavid Rientjes 				h->surplus_huge_pages_node[nid],
4499aca78307SMiaohe Lin 				huge_page_size(h) / SZ_1K);
4500949f7ec5SDavid Rientjes }
4501949f7ec5SDavid Rientjes 
45025d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
45035d317b2bSNaoya Horiguchi {
45045d317b2bSNaoya Horiguchi 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
45055d317b2bSNaoya Horiguchi 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
45065d317b2bSNaoya Horiguchi }
45075d317b2bSNaoya Horiguchi 
45081da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
45091da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
45101da177e4SLinus Torvalds {
4511d0028588SWanpeng Li 	struct hstate *h;
4512d0028588SWanpeng Li 	unsigned long nr_total_pages = 0;
4513d0028588SWanpeng Li 
4514d0028588SWanpeng Li 	for_each_hstate(h)
4515d0028588SWanpeng Li 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4516d0028588SWanpeng Li 	return nr_total_pages;
45171da177e4SLinus Torvalds }
45181da177e4SLinus Torvalds 
4519a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
4520fc1b8a73SMel Gorman {
4521fc1b8a73SMel Gorman 	int ret = -ENOMEM;
4522fc1b8a73SMel Gorman 
45230aa7f354SMiaohe Lin 	if (!delta)
45240aa7f354SMiaohe Lin 		return 0;
45250aa7f354SMiaohe Lin 
4526db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
4527fc1b8a73SMel Gorman 	/*
4528fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
4529fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
4530fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
4531fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
4532fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
4533fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
4534fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
4535fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
4536fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
4537fc1b8a73SMel Gorman 	 *
4538fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
4539fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
4540fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
4541fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
4542fc1b8a73SMel Gorman 	 * semantics that cpuset has.
45438ca39e68SMuchun Song 	 *
45448ca39e68SMuchun Song 	 * Apart from cpuset, we also have memory policy mechanism that
45458ca39e68SMuchun Song 	 * also determines from which node the kernel will allocate memory
45468ca39e68SMuchun Song 	 * in a NUMA system. So similar to cpuset, we also should consider
45478ca39e68SMuchun Song 	 * the memory policy of the current task. Similar to the description
45488ca39e68SMuchun Song 	 * above.
4549fc1b8a73SMel Gorman 	 */
4550fc1b8a73SMel Gorman 	if (delta > 0) {
4551a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
4552fc1b8a73SMel Gorman 			goto out;
4553fc1b8a73SMel Gorman 
45548ca39e68SMuchun Song 		if (delta > allowed_mems_nr(h)) {
4555a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
4556fc1b8a73SMel Gorman 			goto out;
4557fc1b8a73SMel Gorman 		}
4558fc1b8a73SMel Gorman 	}
4559fc1b8a73SMel Gorman 
4560fc1b8a73SMel Gorman 	ret = 0;
4561fc1b8a73SMel Gorman 	if (delta < 0)
4562a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
4563fc1b8a73SMel Gorman 
4564fc1b8a73SMel Gorman out:
4565db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
4566fc1b8a73SMel Gorman 	return ret;
4567fc1b8a73SMel Gorman }
4568fc1b8a73SMel Gorman 
456984afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
457084afd99bSAndy Whitcroft {
4571f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
457284afd99bSAndy Whitcroft 
457384afd99bSAndy Whitcroft 	/*
457484afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
457584afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
457684afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
457725985edcSLucas De Marchi 	 * has a reference to the reservation map it cannot disappear until
457884afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
457984afd99bSAndy Whitcroft 	 * new reference here without additional locking.
458084afd99bSAndy Whitcroft 	 */
458109a26e83SMike Kravetz 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
458209a26e83SMike Kravetz 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4583f522c3acSJoonsoo Kim 		kref_get(&resv->refs);
458484afd99bSAndy Whitcroft 	}
458509a26e83SMike Kravetz }
458684afd99bSAndy Whitcroft 
4587a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4588a1e78772SMel Gorman {
4589a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
4590f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
459190481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
45924e35f483SJoonsoo Kim 	unsigned long reserve, start, end;
45931c5ecae3SMike Kravetz 	long gbl_reserve;
459484afd99bSAndy Whitcroft 
45954e35f483SJoonsoo Kim 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
45964e35f483SJoonsoo Kim 		return;
45974e35f483SJoonsoo Kim 
4598a5516438SAndi Kleen 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4599a5516438SAndi Kleen 	end = vma_hugecache_offset(h, vma, vma->vm_end);
460084afd99bSAndy Whitcroft 
46014e35f483SJoonsoo Kim 	reserve = (end - start) - region_count(resv, start, end);
4602e9fe92aeSMina Almasry 	hugetlb_cgroup_uncharge_counter(resv, start, end);
46037251ff78SAdam Litke 	if (reserve) {
46041c5ecae3SMike Kravetz 		/*
46051c5ecae3SMike Kravetz 		 * Decrement reserve counts.  The global reserve count may be
46061c5ecae3SMike Kravetz 		 * adjusted if the subpool has a minimum size.
46071c5ecae3SMike Kravetz 		 */
46081c5ecae3SMike Kravetz 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
46091c5ecae3SMike Kravetz 		hugetlb_acct_memory(h, -gbl_reserve);
46107251ff78SAdam Litke 	}
4611e9fe92aeSMina Almasry 
4612e9fe92aeSMina Almasry 	kref_put(&resv->refs, resv_map_release);
4613a1e78772SMel Gorman }
4614a1e78772SMel Gorman 
461531383c68SDan Williams static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
461631383c68SDan Williams {
461731383c68SDan Williams 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
461831383c68SDan Williams 		return -EINVAL;
461931383c68SDan Williams 	return 0;
462031383c68SDan Williams }
462131383c68SDan Williams 
462205ea8860SDan Williams static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
462305ea8860SDan Williams {
4624aca78307SMiaohe Lin 	return huge_page_size(hstate_vma(vma));
462505ea8860SDan Williams }
462605ea8860SDan Williams 
46271da177e4SLinus Torvalds /*
46281da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
46291da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
46306c26d310SMiaohe Lin  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
46311da177e4SLinus Torvalds  * this far.
46321da177e4SLinus Torvalds  */
4633b3ec9f33SSouptick Joarder static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
46341da177e4SLinus Torvalds {
46351da177e4SLinus Torvalds 	BUG();
4636d0217ac0SNick Piggin 	return 0;
46371da177e4SLinus Torvalds }
46381da177e4SLinus Torvalds 
4639eec3636aSJane Chu /*
4640eec3636aSJane Chu  * When a new function is introduced to vm_operations_struct and added
4641eec3636aSJane Chu  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4642eec3636aSJane Chu  * This is because under System V memory model, mappings created via
4643eec3636aSJane Chu  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4644eec3636aSJane Chu  * their original vm_ops are overwritten with shm_vm_ops.
4645eec3636aSJane Chu  */
4646f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
4647d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
464884afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
4649a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
4650dd3b614fSDmitry Safonov 	.may_split = hugetlb_vm_op_split,
465105ea8860SDan Williams 	.pagesize = hugetlb_vm_op_pagesize,
46521da177e4SLinus Torvalds };
46531da177e4SLinus Torvalds 
46541e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
46551e8f889bSDavid Gibson 				int writable)
465663551ae0SDavid Gibson {
465763551ae0SDavid Gibson 	pte_t entry;
465879c1c594SChristophe Leroy 	unsigned int shift = huge_page_shift(hstate_vma(vma));
465963551ae0SDavid Gibson 
46601e8f889bSDavid Gibson 	if (writable) {
4661106c992aSGerald Schaefer 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4662106c992aSGerald Schaefer 					 vma->vm_page_prot)));
466363551ae0SDavid Gibson 	} else {
4664106c992aSGerald Schaefer 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4665106c992aSGerald Schaefer 					   vma->vm_page_prot));
466663551ae0SDavid Gibson 	}
466763551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
466879c1c594SChristophe Leroy 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
466963551ae0SDavid Gibson 
467063551ae0SDavid Gibson 	return entry;
467163551ae0SDavid Gibson }
467263551ae0SDavid Gibson 
46731e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
46741e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
46751e8f889bSDavid Gibson {
46761e8f889bSDavid Gibson 	pte_t entry;
46771e8f889bSDavid Gibson 
4678106c992aSGerald Schaefer 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
467932f84528SChris Forbes 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
46804b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
46811e8f889bSDavid Gibson }
46821e8f889bSDavid Gibson 
4683d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte)
46844a705fefSNaoya Horiguchi {
46854a705fefSNaoya Horiguchi 	swp_entry_t swp;
46864a705fefSNaoya Horiguchi 
46874a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
4688d5ed7444SAneesh Kumar K.V 		return false;
46894a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4690d79d176aSBaoquan He 	if (is_migration_entry(swp))
4691d5ed7444SAneesh Kumar K.V 		return true;
46924a705fefSNaoya Horiguchi 	else
4693d5ed7444SAneesh Kumar K.V 		return false;
46944a705fefSNaoya Horiguchi }
46954a705fefSNaoya Horiguchi 
46963e5c3600SBaoquan He static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
46974a705fefSNaoya Horiguchi {
46984a705fefSNaoya Horiguchi 	swp_entry_t swp;
46994a705fefSNaoya Horiguchi 
47004a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
47013e5c3600SBaoquan He 		return false;
47024a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4703d79d176aSBaoquan He 	if (is_hwpoison_entry(swp))
47043e5c3600SBaoquan He 		return true;
47054a705fefSNaoya Horiguchi 	else
47063e5c3600SBaoquan He 		return false;
47074a705fefSNaoya Horiguchi }
47081e8f889bSDavid Gibson 
47094eae4efaSPeter Xu static void
47104eae4efaSPeter Xu hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
47114eae4efaSPeter Xu 		     struct page *new_page)
47124eae4efaSPeter Xu {
47134eae4efaSPeter Xu 	__SetPageUptodate(new_page);
47144eae4efaSPeter Xu 	hugepage_add_new_anon_rmap(new_page, vma, addr);
47151eba86c0SPasha Tatashin 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
47164eae4efaSPeter Xu 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
47174eae4efaSPeter Xu 	ClearHPageRestoreReserve(new_page);
47184eae4efaSPeter Xu 	SetHPageMigratable(new_page);
47194eae4efaSPeter Xu }
47204eae4efaSPeter Xu 
472163551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
472263551ae0SDavid Gibson 			    struct vm_area_struct *vma)
472363551ae0SDavid Gibson {
47245e41540cSMike Kravetz 	pte_t *src_pte, *dst_pte, entry, dst_entry;
472563551ae0SDavid Gibson 	struct page *ptepage;
47261c59827dSHugh Dickins 	unsigned long addr;
4727ca6eb14dSPeter Xu 	bool cow = is_cow_mapping(vma->vm_flags);
4728a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
4729a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
47304eae4efaSPeter Xu 	unsigned long npages = pages_per_huge_page(h);
4731c0d0381aSMike Kravetz 	struct address_space *mapping = vma->vm_file->f_mapping;
4732ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
4733e8569dd2SAndreas Sandberg 	int ret = 0;
47341e8f889bSDavid Gibson 
4735ac46d4f3SJérôme Glisse 	if (cow) {
47367269f999SJérôme Glisse 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
47376f4f13e8SJérôme Glisse 					vma->vm_start,
4738ac46d4f3SJérôme Glisse 					vma->vm_end);
4739ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_start(&range);
4740623a1ddfSDavid Hildenbrand 		mmap_assert_write_locked(src);
4741623a1ddfSDavid Hildenbrand 		raw_write_seqcount_begin(&src->write_protect_seq);
4742c0d0381aSMike Kravetz 	} else {
4743c0d0381aSMike Kravetz 		/*
4744c0d0381aSMike Kravetz 		 * For shared mappings i_mmap_rwsem must be held to call
4745c0d0381aSMike Kravetz 		 * huge_pte_alloc, otherwise the returned ptep could go
4746c0d0381aSMike Kravetz 		 * away if part of a shared pmd and another thread calls
4747c0d0381aSMike Kravetz 		 * huge_pmd_unshare.
4748c0d0381aSMike Kravetz 		 */
4749c0d0381aSMike Kravetz 		i_mmap_lock_read(mapping);
4750ac46d4f3SJérôme Glisse 	}
4751e8569dd2SAndreas Sandberg 
4752a5516438SAndi Kleen 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
4753cb900f41SKirill A. Shutemov 		spinlock_t *src_ptl, *dst_ptl;
47547868a208SPunit Agrawal 		src_pte = huge_pte_offset(src, addr, sz);
4755c74df32cSHugh Dickins 		if (!src_pte)
4756c74df32cSHugh Dickins 			continue;
4757aec44e0fSPeter Xu 		dst_pte = huge_pte_alloc(dst, vma, addr, sz);
4758e8569dd2SAndreas Sandberg 		if (!dst_pte) {
4759e8569dd2SAndreas Sandberg 			ret = -ENOMEM;
4760e8569dd2SAndreas Sandberg 			break;
4761e8569dd2SAndreas Sandberg 		}
4762c5c99429SLarry Woodman 
47635e41540cSMike Kravetz 		/*
47645e41540cSMike Kravetz 		 * If the pagetables are shared don't copy or take references.
47655e41540cSMike Kravetz 		 * dst_pte == src_pte is the common case of src/dest sharing.
47665e41540cSMike Kravetz 		 *
47675e41540cSMike Kravetz 		 * However, src could have 'unshared' and dst shares with
47685e41540cSMike Kravetz 		 * another vma.  If dst_pte !none, this implies sharing.
47695e41540cSMike Kravetz 		 * Check here before taking page table lock, and once again
47705e41540cSMike Kravetz 		 * after taking the lock below.
47715e41540cSMike Kravetz 		 */
47725e41540cSMike Kravetz 		dst_entry = huge_ptep_get(dst_pte);
47735e41540cSMike Kravetz 		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
4774c5c99429SLarry Woodman 			continue;
4775c5c99429SLarry Woodman 
4776cb900f41SKirill A. Shutemov 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4777cb900f41SKirill A. Shutemov 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4778cb900f41SKirill A. Shutemov 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
47794a705fefSNaoya Horiguchi 		entry = huge_ptep_get(src_pte);
47805e41540cSMike Kravetz 		dst_entry = huge_ptep_get(dst_pte);
47814eae4efaSPeter Xu again:
47825e41540cSMike Kravetz 		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
47835e41540cSMike Kravetz 			/*
47845e41540cSMike Kravetz 			 * Skip if src entry none.  Also, skip in the
47855e41540cSMike Kravetz 			 * unlikely case dst entry !none as this implies
47865e41540cSMike Kravetz 			 * sharing with another vma.
47875e41540cSMike Kravetz 			 */
47884a705fefSNaoya Horiguchi 			;
47894a705fefSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
47904a705fefSNaoya Horiguchi 				    is_hugetlb_entry_hwpoisoned(entry))) {
47914a705fefSNaoya Horiguchi 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
47924a705fefSNaoya Horiguchi 
47936c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(swp_entry) && cow) {
47944a705fefSNaoya Horiguchi 				/*
47954a705fefSNaoya Horiguchi 				 * COW mappings require pages in both
47964a705fefSNaoya Horiguchi 				 * parent and child to be set to read.
47974a705fefSNaoya Horiguchi 				 */
47984dd845b5SAlistair Popple 				swp_entry = make_readable_migration_entry(
47994dd845b5SAlistair Popple 							swp_offset(swp_entry));
48004a705fefSNaoya Horiguchi 				entry = swp_entry_to_pte(swp_entry);
4801e5251fd4SPunit Agrawal 				set_huge_swap_pte_at(src, addr, src_pte,
4802e5251fd4SPunit Agrawal 						     entry, sz);
48034a705fefSNaoya Horiguchi 			}
4804e5251fd4SPunit Agrawal 			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
48054a705fefSNaoya Horiguchi 		} else {
48064eae4efaSPeter Xu 			entry = huge_ptep_get(src_pte);
48074eae4efaSPeter Xu 			ptepage = pte_page(entry);
48084eae4efaSPeter Xu 			get_page(ptepage);
48094eae4efaSPeter Xu 
48104eae4efaSPeter Xu 			/*
4811fb3d824dSDavid Hildenbrand 			 * Failing to duplicate the anon rmap is a rare case
4812fb3d824dSDavid Hildenbrand 			 * where we see pinned hugetlb pages while they're
4813fb3d824dSDavid Hildenbrand 			 * prone to COW. We need to do the COW earlier during
4814fb3d824dSDavid Hildenbrand 			 * fork.
48154eae4efaSPeter Xu 			 *
48164eae4efaSPeter Xu 			 * When pre-allocating the page or copying data, we
48174eae4efaSPeter Xu 			 * need to be without the pgtable locks since we could
48184eae4efaSPeter Xu 			 * sleep during the process.
48194eae4efaSPeter Xu 			 */
4820fb3d824dSDavid Hildenbrand 			if (!PageAnon(ptepage)) {
4821fb3d824dSDavid Hildenbrand 				page_dup_file_rmap(ptepage, true);
4822fb3d824dSDavid Hildenbrand 			} else if (page_try_dup_anon_rmap(ptepage, true, vma)) {
48234eae4efaSPeter Xu 				pte_t src_pte_old = entry;
48244eae4efaSPeter Xu 				struct page *new;
48254eae4efaSPeter Xu 
48264eae4efaSPeter Xu 				spin_unlock(src_ptl);
48274eae4efaSPeter Xu 				spin_unlock(dst_ptl);
48284eae4efaSPeter Xu 				/* Do not use reserve as it's private owned */
48294eae4efaSPeter Xu 				new = alloc_huge_page(vma, addr, 1);
48304eae4efaSPeter Xu 				if (IS_ERR(new)) {
48314eae4efaSPeter Xu 					put_page(ptepage);
48324eae4efaSPeter Xu 					ret = PTR_ERR(new);
48334eae4efaSPeter Xu 					break;
48344eae4efaSPeter Xu 				}
48354eae4efaSPeter Xu 				copy_user_huge_page(new, ptepage, addr, vma,
48364eae4efaSPeter Xu 						    npages);
48374eae4efaSPeter Xu 				put_page(ptepage);
48384eae4efaSPeter Xu 
48394eae4efaSPeter Xu 				/* Install the new huge page if src pte stable */
48404eae4efaSPeter Xu 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
48414eae4efaSPeter Xu 				src_ptl = huge_pte_lockptr(h, src, src_pte);
48424eae4efaSPeter Xu 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
48434eae4efaSPeter Xu 				entry = huge_ptep_get(src_pte);
48444eae4efaSPeter Xu 				if (!pte_same(src_pte_old, entry)) {
4845846be085SMike Kravetz 					restore_reserve_on_error(h, vma, addr,
4846846be085SMike Kravetz 								new);
48474eae4efaSPeter Xu 					put_page(new);
48484eae4efaSPeter Xu 					/* dst_entry won't change as in child */
48494eae4efaSPeter Xu 					goto again;
48504eae4efaSPeter Xu 				}
48514eae4efaSPeter Xu 				hugetlb_install_page(vma, dst_pte, addr, new);
48524eae4efaSPeter Xu 				spin_unlock(src_ptl);
48534eae4efaSPeter Xu 				spin_unlock(dst_ptl);
48544eae4efaSPeter Xu 				continue;
48554eae4efaSPeter Xu 			}
48564eae4efaSPeter Xu 
485734ee645eSJoerg Roedel 			if (cow) {
48580f10851eSJérôme Glisse 				/*
48590f10851eSJérôme Glisse 				 * No need to notify as we are downgrading page
48600f10851eSJérôme Glisse 				 * table protection not changing it to point
48610f10851eSJérôme Glisse 				 * to a new page.
48620f10851eSJérôme Glisse 				 *
4863ad56b738SMike Rapoport 				 * See Documentation/vm/mmu_notifier.rst
48640f10851eSJérôme Glisse 				 */
48657f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
486684894e1cSPeter Xu 				entry = huge_pte_wrprotect(entry);
486734ee645eSJoerg Roedel 			}
48684eae4efaSPeter Xu 
486963551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
48704eae4efaSPeter Xu 			hugetlb_count_add(npages, dst);
48711c59827dSHugh Dickins 		}
4872cb900f41SKirill A. Shutemov 		spin_unlock(src_ptl);
4873cb900f41SKirill A. Shutemov 		spin_unlock(dst_ptl);
487463551ae0SDavid Gibson 	}
487563551ae0SDavid Gibson 
4876623a1ddfSDavid Hildenbrand 	if (cow) {
4877623a1ddfSDavid Hildenbrand 		raw_write_seqcount_end(&src->write_protect_seq);
4878ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_end(&range);
4879623a1ddfSDavid Hildenbrand 	} else {
4880c0d0381aSMike Kravetz 		i_mmap_unlock_read(mapping);
4881623a1ddfSDavid Hildenbrand 	}
4882e8569dd2SAndreas Sandberg 
4883e8569dd2SAndreas Sandberg 	return ret;
488463551ae0SDavid Gibson }
488563551ae0SDavid Gibson 
4886550a7d60SMina Almasry static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4887db110a99SAneesh Kumar K.V 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4888550a7d60SMina Almasry {
4889550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4890550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4891550a7d60SMina Almasry 	spinlock_t *src_ptl, *dst_ptl;
4892db110a99SAneesh Kumar K.V 	pte_t pte;
4893550a7d60SMina Almasry 
4894550a7d60SMina Almasry 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4895550a7d60SMina Almasry 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4896550a7d60SMina Almasry 
4897550a7d60SMina Almasry 	/*
4898550a7d60SMina Almasry 	 * We don't have to worry about the ordering of src and dst ptlocks
4899550a7d60SMina Almasry 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4900550a7d60SMina Almasry 	 */
4901550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4902550a7d60SMina Almasry 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4903550a7d60SMina Almasry 
4904550a7d60SMina Almasry 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4905550a7d60SMina Almasry 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4906550a7d60SMina Almasry 
4907550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4908550a7d60SMina Almasry 		spin_unlock(src_ptl);
4909550a7d60SMina Almasry 	spin_unlock(dst_ptl);
4910550a7d60SMina Almasry }
4911550a7d60SMina Almasry 
4912550a7d60SMina Almasry int move_hugetlb_page_tables(struct vm_area_struct *vma,
4913550a7d60SMina Almasry 			     struct vm_area_struct *new_vma,
4914550a7d60SMina Almasry 			     unsigned long old_addr, unsigned long new_addr,
4915550a7d60SMina Almasry 			     unsigned long len)
4916550a7d60SMina Almasry {
4917550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4918550a7d60SMina Almasry 	struct address_space *mapping = vma->vm_file->f_mapping;
4919550a7d60SMina Almasry 	unsigned long sz = huge_page_size(h);
4920550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4921550a7d60SMina Almasry 	unsigned long old_end = old_addr + len;
4922550a7d60SMina Almasry 	unsigned long old_addr_copy;
4923550a7d60SMina Almasry 	pte_t *src_pte, *dst_pte;
4924550a7d60SMina Almasry 	struct mmu_notifier_range range;
49253d0b95cdSBaolin Wang 	bool shared_pmd = false;
4926550a7d60SMina Almasry 
4927550a7d60SMina Almasry 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
4928550a7d60SMina Almasry 				old_end);
4929550a7d60SMina Almasry 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
49303d0b95cdSBaolin Wang 	/*
49313d0b95cdSBaolin Wang 	 * In case of shared PMDs, we should cover the maximum possible
49323d0b95cdSBaolin Wang 	 * range.
49333d0b95cdSBaolin Wang 	 */
49343d0b95cdSBaolin Wang 	flush_cache_range(vma, range.start, range.end);
49353d0b95cdSBaolin Wang 
4936550a7d60SMina Almasry 	mmu_notifier_invalidate_range_start(&range);
4937550a7d60SMina Almasry 	/* Prevent race with file truncation */
4938550a7d60SMina Almasry 	i_mmap_lock_write(mapping);
4939550a7d60SMina Almasry 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
4940550a7d60SMina Almasry 		src_pte = huge_pte_offset(mm, old_addr, sz);
4941550a7d60SMina Almasry 		if (!src_pte)
4942550a7d60SMina Almasry 			continue;
4943550a7d60SMina Almasry 		if (huge_pte_none(huge_ptep_get(src_pte)))
4944550a7d60SMina Almasry 			continue;
4945550a7d60SMina Almasry 
4946550a7d60SMina Almasry 		/* old_addr arg to huge_pmd_unshare() is a pointer and so the
4947550a7d60SMina Almasry 		 * arg may be modified. Pass a copy instead to preserve the
4948550a7d60SMina Almasry 		 * value in old_addr.
4949550a7d60SMina Almasry 		 */
4950550a7d60SMina Almasry 		old_addr_copy = old_addr;
4951550a7d60SMina Almasry 
49523d0b95cdSBaolin Wang 		if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
49533d0b95cdSBaolin Wang 			shared_pmd = true;
4954550a7d60SMina Almasry 			continue;
49553d0b95cdSBaolin Wang 		}
4956550a7d60SMina Almasry 
4957550a7d60SMina Almasry 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
4958550a7d60SMina Almasry 		if (!dst_pte)
4959550a7d60SMina Almasry 			break;
4960550a7d60SMina Almasry 
4961db110a99SAneesh Kumar K.V 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
4962550a7d60SMina Almasry 	}
49633d0b95cdSBaolin Wang 
49643d0b95cdSBaolin Wang 	if (shared_pmd)
49653d0b95cdSBaolin Wang 		flush_tlb_range(vma, range.start, range.end);
49663d0b95cdSBaolin Wang 	else
4967550a7d60SMina Almasry 		flush_tlb_range(vma, old_end - len, old_end);
4968550a7d60SMina Almasry 	mmu_notifier_invalidate_range_end(&range);
496913e4ad2cSNadav Amit 	i_mmap_unlock_write(mapping);
4970550a7d60SMina Almasry 
4971550a7d60SMina Almasry 	return len + old_addr - old_end;
4972550a7d60SMina Almasry }
4973550a7d60SMina Almasry 
497473c54763SPeter Xu static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
497524669e58SAneesh Kumar K.V 				   unsigned long start, unsigned long end,
497624669e58SAneesh Kumar K.V 				   struct page *ref_page)
497763551ae0SDavid Gibson {
497863551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
497963551ae0SDavid Gibson 	unsigned long address;
4980c7546f8fSDavid Gibson 	pte_t *ptep;
498163551ae0SDavid Gibson 	pte_t pte;
4982cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
498363551ae0SDavid Gibson 	struct page *page;
4984a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
4985a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
4986ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
4987a4a118f2SNadav Amit 	bool force_flush = false;
4988a5516438SAndi Kleen 
498963551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
4990a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
4991a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
499263551ae0SDavid Gibson 
499307e32661SAneesh Kumar K.V 	/*
499407e32661SAneesh Kumar K.V 	 * This is a hugetlb vma, all the pte entries should point
499507e32661SAneesh Kumar K.V 	 * to huge page.
499607e32661SAneesh Kumar K.V 	 */
4997ed6a7935SPeter Zijlstra 	tlb_change_page_size(tlb, sz);
499824669e58SAneesh Kumar K.V 	tlb_start_vma(tlb, vma);
4999dff11abeSMike Kravetz 
5000dff11abeSMike Kravetz 	/*
5001dff11abeSMike Kravetz 	 * If sharing possible, alert mmu notifiers of worst case.
5002dff11abeSMike Kravetz 	 */
50036f4f13e8SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
50046f4f13e8SJérôme Glisse 				end);
5005ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5006ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5007569f48b8SHillf Danton 	address = start;
5008569f48b8SHillf Danton 	for (; address < end; address += sz) {
50097868a208SPunit Agrawal 		ptep = huge_pte_offset(mm, address, sz);
5010c7546f8fSDavid Gibson 		if (!ptep)
5011c7546f8fSDavid Gibson 			continue;
5012c7546f8fSDavid Gibson 
5013cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
501434ae204fSMike Kravetz 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
501531d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
5016a4a118f2SNadav Amit 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5017a4a118f2SNadav Amit 			force_flush = true;
501831d49da5SAneesh Kumar K.V 			continue;
501931d49da5SAneesh Kumar K.V 		}
502039dde65cSChen, Kenneth W 
50216629326bSHillf Danton 		pte = huge_ptep_get(ptep);
502231d49da5SAneesh Kumar K.V 		if (huge_pte_none(pte)) {
502331d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
502431d49da5SAneesh Kumar K.V 			continue;
502531d49da5SAneesh Kumar K.V 		}
50266629326bSHillf Danton 
50276629326bSHillf Danton 		/*
50289fbc1f63SNaoya Horiguchi 		 * Migrating hugepage or HWPoisoned hugepage is already
50299fbc1f63SNaoya Horiguchi 		 * unmapped and its refcount is dropped, so just clear pte here.
50306629326bSHillf Danton 		 */
50319fbc1f63SNaoya Horiguchi 		if (unlikely(!pte_present(pte))) {
50329386fac3SPunit Agrawal 			huge_pte_clear(mm, address, ptep, sz);
503331d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
503431d49da5SAneesh Kumar K.V 			continue;
50358c4894c6SNaoya Horiguchi 		}
50366629326bSHillf Danton 
50376629326bSHillf Danton 		page = pte_page(pte);
503804f2cbe3SMel Gorman 		/*
503904f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
504004f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
504104f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
504204f2cbe3SMel Gorman 		 */
504304f2cbe3SMel Gorman 		if (ref_page) {
504431d49da5SAneesh Kumar K.V 			if (page != ref_page) {
504531d49da5SAneesh Kumar K.V 				spin_unlock(ptl);
504631d49da5SAneesh Kumar K.V 				continue;
504731d49da5SAneesh Kumar K.V 			}
504804f2cbe3SMel Gorman 			/*
504904f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
505004f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
505104f2cbe3SMel Gorman 			 * looking like data was lost
505204f2cbe3SMel Gorman 			 */
505304f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
505404f2cbe3SMel Gorman 		}
505504f2cbe3SMel Gorman 
5056c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5057b528e4b6SAneesh Kumar K.V 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5058106c992aSGerald Schaefer 		if (huge_pte_dirty(pte))
50596649a386SKen Chen 			set_page_dirty(page);
50609e81130bSHillf Danton 
50615d317b2bSNaoya Horiguchi 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5062cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, true);
506331d49da5SAneesh Kumar K.V 
5064cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
5065e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, huge_page_size(h));
506624669e58SAneesh Kumar K.V 		/*
506731d49da5SAneesh Kumar K.V 		 * Bail out after unmapping reference page if supplied
506824669e58SAneesh Kumar K.V 		 */
506931d49da5SAneesh Kumar K.V 		if (ref_page)
507031d49da5SAneesh Kumar K.V 			break;
5071fe1668aeSChen, Kenneth W 	}
5072ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
507324669e58SAneesh Kumar K.V 	tlb_end_vma(tlb, vma);
5074a4a118f2SNadav Amit 
5075a4a118f2SNadav Amit 	/*
5076a4a118f2SNadav Amit 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5077a4a118f2SNadav Amit 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5078a4a118f2SNadav Amit 	 * guaranteed that the last refernece would not be dropped. But we must
5079a4a118f2SNadav Amit 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5080a4a118f2SNadav Amit 	 * dropped and the last reference to the shared PMDs page might be
5081a4a118f2SNadav Amit 	 * dropped as well.
5082a4a118f2SNadav Amit 	 *
5083a4a118f2SNadav Amit 	 * In theory we could defer the freeing of the PMD pages as well, but
5084a4a118f2SNadav Amit 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5085a4a118f2SNadav Amit 	 * detect sharing, so we cannot defer the release of the page either.
5086a4a118f2SNadav Amit 	 * Instead, do flush now.
5087a4a118f2SNadav Amit 	 */
5088a4a118f2SNadav Amit 	if (force_flush)
5089a4a118f2SNadav Amit 		tlb_flush_mmu_tlbonly(tlb);
50901da177e4SLinus Torvalds }
509163551ae0SDavid Gibson 
5092d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5093d833352aSMel Gorman 			  struct vm_area_struct *vma, unsigned long start,
5094d833352aSMel Gorman 			  unsigned long end, struct page *ref_page)
5095d833352aSMel Gorman {
5096d833352aSMel Gorman 	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
5097d833352aSMel Gorman 
5098d833352aSMel Gorman 	/*
5099d833352aSMel Gorman 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
5100d833352aSMel Gorman 	 * test will fail on a vma being torn down, and not grab a page table
5101d833352aSMel Gorman 	 * on its way out.  We're lucky that the flag has such an appropriate
5102d833352aSMel Gorman 	 * name, and can in fact be safely cleared here. We could clear it
5103d833352aSMel Gorman 	 * before the __unmap_hugepage_range above, but all that's necessary
5104c8c06efaSDavidlohr Bueso 	 * is to clear it before releasing the i_mmap_rwsem. This works
5105d833352aSMel Gorman 	 * because in the context this is called, the VMA is about to be
5106c8c06efaSDavidlohr Bueso 	 * destroyed and the i_mmap_rwsem is held.
5107d833352aSMel Gorman 	 */
5108d833352aSMel Gorman 	vma->vm_flags &= ~VM_MAYSHARE;
5109d833352aSMel Gorman }
5110d833352aSMel Gorman 
5111502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
511204f2cbe3SMel Gorman 			  unsigned long end, struct page *ref_page)
5113502717f4SChen, Kenneth W {
511424669e58SAneesh Kumar K.V 	struct mmu_gather tlb;
5115dff11abeSMike Kravetz 
5116a72afd87SWill Deacon 	tlb_gather_mmu(&tlb, vma->vm_mm);
511724669e58SAneesh Kumar K.V 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
5118ae8eba8bSWill Deacon 	tlb_finish_mmu(&tlb);
5119502717f4SChen, Kenneth W }
5120502717f4SChen, Kenneth W 
512104f2cbe3SMel Gorman /*
512204f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5123578b7725SZhiyuan Dai  * mapping it owns the reserve page for. The intention is to unmap the page
512404f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
512504f2cbe3SMel Gorman  * same region.
512604f2cbe3SMel Gorman  */
51272f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
51282a4b3dedSHarvey Harrison 			      struct page *page, unsigned long address)
512904f2cbe3SMel Gorman {
51307526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
513104f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
513204f2cbe3SMel Gorman 	struct address_space *mapping;
513304f2cbe3SMel Gorman 	pgoff_t pgoff;
513404f2cbe3SMel Gorman 
513504f2cbe3SMel Gorman 	/*
513604f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
513704f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
513804f2cbe3SMel Gorman 	 */
51397526674dSAdam Litke 	address = address & huge_page_mask(h);
514036e4f20aSMichal Hocko 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
514136e4f20aSMichal Hocko 			vma->vm_pgoff;
514293c76a3dSAl Viro 	mapping = vma->vm_file->f_mapping;
514304f2cbe3SMel Gorman 
51444eb2b1dcSMel Gorman 	/*
51454eb2b1dcSMel Gorman 	 * Take the mapping lock for the duration of the table walk. As
51464eb2b1dcSMel Gorman 	 * this mapping should be shared between all the VMAs,
51474eb2b1dcSMel Gorman 	 * __unmap_hugepage_range() is called as the lock is already held
51484eb2b1dcSMel Gorman 	 */
514983cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
51506b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
515104f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
515204f2cbe3SMel Gorman 		if (iter_vma == vma)
515304f2cbe3SMel Gorman 			continue;
515404f2cbe3SMel Gorman 
515504f2cbe3SMel Gorman 		/*
51562f84a899SMel Gorman 		 * Shared VMAs have their own reserves and do not affect
51572f84a899SMel Gorman 		 * MAP_PRIVATE accounting but it is possible that a shared
51582f84a899SMel Gorman 		 * VMA is using the same page so check and skip such VMAs.
51592f84a899SMel Gorman 		 */
51602f84a899SMel Gorman 		if (iter_vma->vm_flags & VM_MAYSHARE)
51612f84a899SMel Gorman 			continue;
51622f84a899SMel Gorman 
51632f84a899SMel Gorman 		/*
516404f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
516504f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
516604f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
516704f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
516804f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
516904f2cbe3SMel Gorman 		 */
517004f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
517124669e58SAneesh Kumar K.V 			unmap_hugepage_range(iter_vma, address,
517224669e58SAneesh Kumar K.V 					     address + huge_page_size(h), page);
517304f2cbe3SMel Gorman 	}
517483cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
517504f2cbe3SMel Gorman }
517604f2cbe3SMel Gorman 
51770fe6e20bSNaoya Horiguchi /*
5178c89357e2SDavid Hildenbrand  * hugetlb_wp() should be called with page lock of the original hugepage held.
5179aa6d2e8cSBaolin Wang  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5180ef009b25SMichal Hocko  * cannot race with other handlers or page migration.
5181ef009b25SMichal Hocko  * Keep the pte_same checks anyway to make transition from the mutex easier.
51820fe6e20bSNaoya Horiguchi  */
5183c89357e2SDavid Hildenbrand static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5184c89357e2SDavid Hildenbrand 		       unsigned long address, pte_t *ptep, unsigned int flags,
5185cb900f41SKirill A. Shutemov 		       struct page *pagecache_page, spinlock_t *ptl)
51861e8f889bSDavid Gibson {
5187c89357e2SDavid Hildenbrand 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
51883999f52eSAneesh Kumar K.V 	pte_t pte;
5189a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
51901e8f889bSDavid Gibson 	struct page *old_page, *new_page;
51912b740303SSouptick Joarder 	int outside_reserve = 0;
51922b740303SSouptick Joarder 	vm_fault_t ret = 0;
5193974e6d66SHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5194ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
51951e8f889bSDavid Gibson 
5196c89357e2SDavid Hildenbrand 	VM_BUG_ON(unshare && (flags & FOLL_WRITE));
5197c89357e2SDavid Hildenbrand 	VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
5198c89357e2SDavid Hildenbrand 
51993999f52eSAneesh Kumar K.V 	pte = huge_ptep_get(ptep);
52001e8f889bSDavid Gibson 	old_page = pte_page(pte);
52011e8f889bSDavid Gibson 
520204f2cbe3SMel Gorman retry_avoidcopy:
5203c89357e2SDavid Hildenbrand 	/*
5204c89357e2SDavid Hildenbrand 	 * If no-one else is actually using this page, we're the exclusive
5205c89357e2SDavid Hildenbrand 	 * owner and can reuse this page.
5206c89357e2SDavid Hildenbrand 	 */
520737a2140dSJoonsoo Kim 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5208c89357e2SDavid Hildenbrand 		if (!PageAnonExclusive(old_page))
52095a49973dSHugh Dickins 			page_move_anon_rmap(old_page, vma);
5210c89357e2SDavid Hildenbrand 		if (likely(!unshare))
52115b7a1d40SHuang Ying 			set_huge_ptep_writable(vma, haddr, ptep);
521283c54070SNick Piggin 		return 0;
52131e8f889bSDavid Gibson 	}
52146c287605SDavid Hildenbrand 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
52156c287605SDavid Hildenbrand 		       old_page);
52161e8f889bSDavid Gibson 
521704f2cbe3SMel Gorman 	/*
521804f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
521904f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
522004f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
522104f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
522204f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
522304f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
522404f2cbe3SMel Gorman 	 * of the full address range.
522504f2cbe3SMel Gorman 	 */
52265944d011SJoonsoo Kim 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
522704f2cbe3SMel Gorman 			old_page != pagecache_page)
522804f2cbe3SMel Gorman 		outside_reserve = 1;
522904f2cbe3SMel Gorman 
523009cbfeafSKirill A. Shutemov 	get_page(old_page);
5231b76c8cfbSLarry Woodman 
5232ad4404a2SDavidlohr Bueso 	/*
5233ad4404a2SDavidlohr Bueso 	 * Drop page table lock as buddy allocator may be called. It will
5234ad4404a2SDavidlohr Bueso 	 * be acquired again before returning to the caller, as expected.
5235ad4404a2SDavidlohr Bueso 	 */
5236cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
52375b7a1d40SHuang Ying 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
52381e8f889bSDavid Gibson 
52392fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
524004f2cbe3SMel Gorman 		/*
524104f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
524204f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
524304f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
524404f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
524504f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
524604f2cbe3SMel Gorman 		 */
524704f2cbe3SMel Gorman 		if (outside_reserve) {
5248e7dd91c4SMike Kravetz 			struct address_space *mapping = vma->vm_file->f_mapping;
5249e7dd91c4SMike Kravetz 			pgoff_t idx;
5250e7dd91c4SMike Kravetz 			u32 hash;
5251e7dd91c4SMike Kravetz 
525209cbfeafSKirill A. Shutemov 			put_page(old_page);
525304f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
5254e7dd91c4SMike Kravetz 			/*
5255e7dd91c4SMike Kravetz 			 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
5256e7dd91c4SMike Kravetz 			 * unmapping.  unmapping needs to hold i_mmap_rwsem
5257e7dd91c4SMike Kravetz 			 * in write mode.  Dropping i_mmap_rwsem in read mode
5258e7dd91c4SMike Kravetz 			 * here is OK as COW mappings do not interact with
5259e7dd91c4SMike Kravetz 			 * PMD sharing.
5260e7dd91c4SMike Kravetz 			 *
5261e7dd91c4SMike Kravetz 			 * Reacquire both after unmap operation.
5262e7dd91c4SMike Kravetz 			 */
5263e7dd91c4SMike Kravetz 			idx = vma_hugecache_offset(h, vma, haddr);
5264e7dd91c4SMike Kravetz 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5265e7dd91c4SMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5266e7dd91c4SMike Kravetz 			i_mmap_unlock_read(mapping);
5267e7dd91c4SMike Kravetz 
52685b7a1d40SHuang Ying 			unmap_ref_private(mm, vma, old_page, haddr);
5269e7dd91c4SMike Kravetz 
5270e7dd91c4SMike Kravetz 			i_mmap_lock_read(mapping);
5271e7dd91c4SMike Kravetz 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5272cb900f41SKirill A. Shutemov 			spin_lock(ptl);
52735b7a1d40SHuang Ying 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5274a9af0c5dSNaoya Horiguchi 			if (likely(ptep &&
5275a9af0c5dSNaoya Horiguchi 				   pte_same(huge_ptep_get(ptep), pte)))
527604f2cbe3SMel Gorman 				goto retry_avoidcopy;
5277a734bcc8SHillf Danton 			/*
5278cb900f41SKirill A. Shutemov 			 * race occurs while re-acquiring page table
5279cb900f41SKirill A. Shutemov 			 * lock, and our job is done.
5280a734bcc8SHillf Danton 			 */
5281a734bcc8SHillf Danton 			return 0;
528204f2cbe3SMel Gorman 		}
528304f2cbe3SMel Gorman 
52842b740303SSouptick Joarder 		ret = vmf_error(PTR_ERR(new_page));
5285ad4404a2SDavidlohr Bueso 		goto out_release_old;
52861e8f889bSDavid Gibson 	}
52871e8f889bSDavid Gibson 
52880fe6e20bSNaoya Horiguchi 	/*
52890fe6e20bSNaoya Horiguchi 	 * When the original hugepage is shared one, it does not have
52900fe6e20bSNaoya Horiguchi 	 * anon_vma prepared.
52910fe6e20bSNaoya Horiguchi 	 */
529244e2aa93SDean Nelson 	if (unlikely(anon_vma_prepare(vma))) {
5293ad4404a2SDavidlohr Bueso 		ret = VM_FAULT_OOM;
5294ad4404a2SDavidlohr Bueso 		goto out_release_all;
529544e2aa93SDean Nelson 	}
52960fe6e20bSNaoya Horiguchi 
5297974e6d66SHuang Ying 	copy_user_huge_page(new_page, old_page, address, vma,
529847ad8475SAndrea Arcangeli 			    pages_per_huge_page(h));
52990ed361deSNick Piggin 	__SetPageUptodate(new_page);
53001e8f889bSDavid Gibson 
53017269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
53026f4f13e8SJérôme Glisse 				haddr + huge_page_size(h));
5303ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5304ad4404a2SDavidlohr Bueso 
5305b76c8cfbSLarry Woodman 	/*
5306cb900f41SKirill A. Shutemov 	 * Retake the page table lock to check for racing updates
5307b76c8cfbSLarry Woodman 	 * before the page tables are altered
5308b76c8cfbSLarry Woodman 	 */
5309cb900f41SKirill A. Shutemov 	spin_lock(ptl);
53105b7a1d40SHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5311a9af0c5dSNaoya Horiguchi 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5312d6995da3SMike Kravetz 		ClearHPageRestoreReserve(new_page);
531307443a85SJoonsoo Kim 
5314c89357e2SDavid Hildenbrand 		/* Break COW or unshare */
53155b7a1d40SHuang Ying 		huge_ptep_clear_flush(vma, haddr, ptep);
5316ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5317cea86fe2SHugh Dickins 		page_remove_rmap(old_page, vma, true);
53185b7a1d40SHuang Ying 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
53191eba86c0SPasha Tatashin 		set_huge_pte_at(mm, haddr, ptep,
5320c89357e2SDavid Hildenbrand 				make_huge_pte(vma, new_page, !unshare));
53218f251a3dSMike Kravetz 		SetHPageMigratable(new_page);
53221e8f889bSDavid Gibson 		/* Make the old page be freed below */
53231e8f889bSDavid Gibson 		new_page = old_page;
53241e8f889bSDavid Gibson 	}
5325cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5326ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
5327ad4404a2SDavidlohr Bueso out_release_all:
5328c89357e2SDavid Hildenbrand 	/*
5329c89357e2SDavid Hildenbrand 	 * No restore in case of successful pagetable update (Break COW or
5330c89357e2SDavid Hildenbrand 	 * unshare)
5331c89357e2SDavid Hildenbrand 	 */
5332c7b1850dSMike Kravetz 	if (new_page != old_page)
53335b7a1d40SHuang Ying 		restore_reserve_on_error(h, vma, haddr, new_page);
533409cbfeafSKirill A. Shutemov 	put_page(new_page);
5335ad4404a2SDavidlohr Bueso out_release_old:
533609cbfeafSKirill A. Shutemov 	put_page(old_page);
53378312034fSJoonsoo Kim 
5338ad4404a2SDavidlohr Bueso 	spin_lock(ptl); /* Caller expects lock to be held */
5339ad4404a2SDavidlohr Bueso 	return ret;
53401e8f889bSDavid Gibson }
53411e8f889bSDavid Gibson 
534204f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */
5343a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h,
5344a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
534504f2cbe3SMel Gorman {
534604f2cbe3SMel Gorman 	struct address_space *mapping;
5347e7c4b0bfSAndy Whitcroft 	pgoff_t idx;
534804f2cbe3SMel Gorman 
534904f2cbe3SMel Gorman 	mapping = vma->vm_file->f_mapping;
5350a5516438SAndi Kleen 	idx = vma_hugecache_offset(h, vma, address);
535104f2cbe3SMel Gorman 
535204f2cbe3SMel Gorman 	return find_lock_page(mapping, idx);
535304f2cbe3SMel Gorman }
535404f2cbe3SMel Gorman 
53553ae77f43SHugh Dickins /*
53563ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
53573ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
53583ae77f43SHugh Dickins  */
53593ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
53602a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
53612a15efc9SHugh Dickins {
53622a15efc9SHugh Dickins 	struct address_space *mapping;
53632a15efc9SHugh Dickins 	pgoff_t idx;
53642a15efc9SHugh Dickins 	struct page *page;
53652a15efc9SHugh Dickins 
53662a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
53672a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
53682a15efc9SHugh Dickins 
53692a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
53702a15efc9SHugh Dickins 	if (page)
53712a15efc9SHugh Dickins 		put_page(page);
53722a15efc9SHugh Dickins 	return page != NULL;
53732a15efc9SHugh Dickins }
53742a15efc9SHugh Dickins 
5375ab76ad54SMike Kravetz int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
5376ab76ad54SMike Kravetz 			   pgoff_t idx)
5377ab76ad54SMike Kravetz {
5378ab76ad54SMike Kravetz 	struct inode *inode = mapping->host;
5379ab76ad54SMike Kravetz 	struct hstate *h = hstate_inode(inode);
5380ab76ad54SMike Kravetz 	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
5381ab76ad54SMike Kravetz 
5382ab76ad54SMike Kravetz 	if (err)
5383ab76ad54SMike Kravetz 		return err;
5384d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
5385ab76ad54SMike Kravetz 
538622146c3cSMike Kravetz 	/*
538722146c3cSMike Kravetz 	 * set page dirty so that it will not be removed from cache/file
538822146c3cSMike Kravetz 	 * by non-hugetlbfs specific code paths.
538922146c3cSMike Kravetz 	 */
539022146c3cSMike Kravetz 	set_page_dirty(page);
539122146c3cSMike Kravetz 
5392ab76ad54SMike Kravetz 	spin_lock(&inode->i_lock);
5393ab76ad54SMike Kravetz 	inode->i_blocks += blocks_per_huge_page(h);
5394ab76ad54SMike Kravetz 	spin_unlock(&inode->i_lock);
5395ab76ad54SMike Kravetz 	return 0;
5396ab76ad54SMike Kravetz }
5397ab76ad54SMike Kravetz 
53987677f7fdSAxel Rasmussen static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
53997677f7fdSAxel Rasmussen 						  struct address_space *mapping,
54007677f7fdSAxel Rasmussen 						  pgoff_t idx,
54017677f7fdSAxel Rasmussen 						  unsigned int flags,
54027677f7fdSAxel Rasmussen 						  unsigned long haddr,
5403824ddc60SNadav Amit 						  unsigned long addr,
54047677f7fdSAxel Rasmussen 						  unsigned long reason)
54057677f7fdSAxel Rasmussen {
54067677f7fdSAxel Rasmussen 	vm_fault_t ret;
54077677f7fdSAxel Rasmussen 	u32 hash;
54087677f7fdSAxel Rasmussen 	struct vm_fault vmf = {
54097677f7fdSAxel Rasmussen 		.vma = vma,
54107677f7fdSAxel Rasmussen 		.address = haddr,
5411824ddc60SNadav Amit 		.real_address = addr,
54127677f7fdSAxel Rasmussen 		.flags = flags,
54137677f7fdSAxel Rasmussen 
54147677f7fdSAxel Rasmussen 		/*
54157677f7fdSAxel Rasmussen 		 * Hard to debug if it ends up being
54167677f7fdSAxel Rasmussen 		 * used by a callee that assumes
54177677f7fdSAxel Rasmussen 		 * something about the other
54187677f7fdSAxel Rasmussen 		 * uninitialized fields... same as in
54197677f7fdSAxel Rasmussen 		 * memory.c
54207677f7fdSAxel Rasmussen 		 */
54217677f7fdSAxel Rasmussen 	};
54227677f7fdSAxel Rasmussen 
54237677f7fdSAxel Rasmussen 	/*
54247677f7fdSAxel Rasmussen 	 * hugetlb_fault_mutex and i_mmap_rwsem must be
54257677f7fdSAxel Rasmussen 	 * dropped before handling userfault.  Reacquire
54267677f7fdSAxel Rasmussen 	 * after handling fault to make calling code simpler.
54277677f7fdSAxel Rasmussen 	 */
54287677f7fdSAxel Rasmussen 	hash = hugetlb_fault_mutex_hash(mapping, idx);
54297677f7fdSAxel Rasmussen 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
54307677f7fdSAxel Rasmussen 	i_mmap_unlock_read(mapping);
54317677f7fdSAxel Rasmussen 	ret = handle_userfault(&vmf, reason);
54327677f7fdSAxel Rasmussen 	i_mmap_lock_read(mapping);
54337677f7fdSAxel Rasmussen 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
54347677f7fdSAxel Rasmussen 
54357677f7fdSAxel Rasmussen 	return ret;
54367677f7fdSAxel Rasmussen }
54377677f7fdSAxel Rasmussen 
54382b740303SSouptick Joarder static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
54392b740303SSouptick Joarder 			struct vm_area_struct *vma,
54408382d914SDavidlohr Bueso 			struct address_space *mapping, pgoff_t idx,
5441c64e912cSPeter Xu 			unsigned long address, pte_t *ptep,
5442c64e912cSPeter Xu 			pte_t old_pte, unsigned int flags)
5443ac9b9c66SHugh Dickins {
5444a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
54452b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
5446409eb8c2SHillf Danton 	int anon_rmap = 0;
54474c887265SAdam Litke 	unsigned long size;
54484c887265SAdam Litke 	struct page *page;
54491e8f889bSDavid Gibson 	pte_t new_pte;
5450cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
5451285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5452c7b1850dSMike Kravetz 	bool new_page, new_pagecache_page = false;
54534c887265SAdam Litke 
545404f2cbe3SMel Gorman 	/*
545504f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
545604f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
5457c89357e2SDavid Hildenbrand 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5458c89357e2SDavid Hildenbrand 	 * be obvious.
545904f2cbe3SMel Gorman 	 */
546004f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5461910154d5SGeoffrey Thomas 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
546204f2cbe3SMel Gorman 			   current->pid);
546304f2cbe3SMel Gorman 		return ret;
546404f2cbe3SMel Gorman 	}
546504f2cbe3SMel Gorman 
54664c887265SAdam Litke 	/*
546787bf91d3SMike Kravetz 	 * We can not race with truncation due to holding i_mmap_rwsem.
546887bf91d3SMike Kravetz 	 * i_size is modified when holding i_mmap_rwsem, so check here
546987bf91d3SMike Kravetz 	 * once for faults beyond end of file.
54704c887265SAdam Litke 	 */
5471a5516438SAndi Kleen 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5472ebed4bfcSHugh Dickins 	if (idx >= size)
5473ebed4bfcSHugh Dickins 		goto out;
54741a1aad8aSMike Kravetz 
547587bf91d3SMike Kravetz retry:
5476c7b1850dSMike Kravetz 	new_page = false;
547787bf91d3SMike Kravetz 	page = find_lock_page(mapping, idx);
547887bf91d3SMike Kravetz 	if (!page) {
54797677f7fdSAxel Rasmussen 		/* Check for page in userfault range */
54801a1aad8aSMike Kravetz 		if (userfaultfd_missing(vma)) {
54817677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5482824ddc60SNadav Amit 						       flags, haddr, address,
54837677f7fdSAxel Rasmussen 						       VM_UFFD_MISSING);
54841a1aad8aSMike Kravetz 			goto out;
54851a1aad8aSMike Kravetz 		}
54861a1aad8aSMike Kravetz 
5487285b8dcaSHuang Ying 		page = alloc_huge_page(vma, haddr, 0);
54882fc39cecSAdam Litke 		if (IS_ERR(page)) {
54894643d67eSMike Kravetz 			/*
54904643d67eSMike Kravetz 			 * Returning error will result in faulting task being
54914643d67eSMike Kravetz 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
54924643d67eSMike Kravetz 			 * tasks from racing to fault in the same page which
54934643d67eSMike Kravetz 			 * could result in false unable to allocate errors.
54944643d67eSMike Kravetz 			 * Page migration does not take the fault mutex, but
54954643d67eSMike Kravetz 			 * does a clear then write of pte's under page table
54964643d67eSMike Kravetz 			 * lock.  Page fault code could race with migration,
54974643d67eSMike Kravetz 			 * notice the clear pte and try to allocate a page
54984643d67eSMike Kravetz 			 * here.  Before returning error, get ptl and make
54994643d67eSMike Kravetz 			 * sure there really is no pte entry.
55004643d67eSMike Kravetz 			 */
55014643d67eSMike Kravetz 			ptl = huge_pte_lock(h, mm, ptep);
55024643d67eSMike Kravetz 			ret = 0;
5503d83e6c8aSMiaohe Lin 			if (huge_pte_none(huge_ptep_get(ptep)))
55042b740303SSouptick Joarder 				ret = vmf_error(PTR_ERR(page));
5505d83e6c8aSMiaohe Lin 			spin_unlock(ptl);
55066bda666aSChristoph Lameter 			goto out;
55076bda666aSChristoph Lameter 		}
550847ad8475SAndrea Arcangeli 		clear_huge_page(page, address, pages_per_huge_page(h));
55090ed361deSNick Piggin 		__SetPageUptodate(page);
5510cb6acd01SMike Kravetz 		new_page = true;
5511ac9b9c66SHugh Dickins 
5512f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
5513ab76ad54SMike Kravetz 			int err = huge_add_to_page_cache(page, mapping, idx);
55146bda666aSChristoph Lameter 			if (err) {
55156bda666aSChristoph Lameter 				put_page(page);
55166bda666aSChristoph Lameter 				if (err == -EEXIST)
55176bda666aSChristoph Lameter 					goto retry;
55186bda666aSChristoph Lameter 				goto out;
55196bda666aSChristoph Lameter 			}
5520c7b1850dSMike Kravetz 			new_pagecache_page = true;
552123be7468SMel Gorman 		} else {
55226bda666aSChristoph Lameter 			lock_page(page);
55230fe6e20bSNaoya Horiguchi 			if (unlikely(anon_vma_prepare(vma))) {
55240fe6e20bSNaoya Horiguchi 				ret = VM_FAULT_OOM;
55250fe6e20bSNaoya Horiguchi 				goto backout_unlocked;
552623be7468SMel Gorman 			}
5527409eb8c2SHillf Danton 			anon_rmap = 1;
55280fe6e20bSNaoya Horiguchi 		}
55290fe6e20bSNaoya Horiguchi 	} else {
553057303d80SAndy Whitcroft 		/*
5531998b4382SNaoya Horiguchi 		 * If memory error occurs between mmap() and fault, some process
5532998b4382SNaoya Horiguchi 		 * don't have hwpoisoned swap entry for errored virtual address.
5533998b4382SNaoya Horiguchi 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5534fd6a03edSNaoya Horiguchi 		 */
5535fd6a03edSNaoya Horiguchi 		if (unlikely(PageHWPoison(page))) {
55360eb98f15SMiaohe Lin 			ret = VM_FAULT_HWPOISON_LARGE |
5537972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5538fd6a03edSNaoya Horiguchi 			goto backout_unlocked;
55396bda666aSChristoph Lameter 		}
55407677f7fdSAxel Rasmussen 
55417677f7fdSAxel Rasmussen 		/* Check for page in userfault range. */
55427677f7fdSAxel Rasmussen 		if (userfaultfd_minor(vma)) {
55437677f7fdSAxel Rasmussen 			unlock_page(page);
55447677f7fdSAxel Rasmussen 			put_page(page);
55457677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5546824ddc60SNadav Amit 						       flags, haddr, address,
55477677f7fdSAxel Rasmussen 						       VM_UFFD_MINOR);
55487677f7fdSAxel Rasmussen 			goto out;
55497677f7fdSAxel Rasmussen 		}
5550998b4382SNaoya Horiguchi 	}
55511e8f889bSDavid Gibson 
555257303d80SAndy Whitcroft 	/*
555357303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
555457303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
555557303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
555657303d80SAndy Whitcroft 	 * the spinlock.
555757303d80SAndy Whitcroft 	 */
55585e911373SMike Kravetz 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5559285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
55602b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
55612b26736cSAndy Whitcroft 			goto backout_unlocked;
55622b26736cSAndy Whitcroft 		}
55635e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5564285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
55655e911373SMike Kravetz 	}
556657303d80SAndy Whitcroft 
55678bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(h, mm, ptep);
556883c54070SNick Piggin 	ret = 0;
5569c64e912cSPeter Xu 	/* If pte changed from under us, retry */
5570c64e912cSPeter Xu 	if (!pte_same(huge_ptep_get(ptep), old_pte))
55714c887265SAdam Litke 		goto backout;
55724c887265SAdam Litke 
557307443a85SJoonsoo Kim 	if (anon_rmap) {
5574d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
5575285b8dcaSHuang Ying 		hugepage_add_new_anon_rmap(page, vma, haddr);
5576ac714904SChoi Gi-yong 	} else
5577fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
55781e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
55791e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
5580c64e912cSPeter Xu 	/*
5581c64e912cSPeter Xu 	 * If this pte was previously wr-protected, keep it wr-protected even
5582c64e912cSPeter Xu 	 * if populated.
5583c64e912cSPeter Xu 	 */
5584c64e912cSPeter Xu 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5585c64e912cSPeter Xu 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5586285b8dcaSHuang Ying 	set_huge_pte_at(mm, haddr, ptep, new_pte);
55871e8f889bSDavid Gibson 
55885d317b2bSNaoya Horiguchi 	hugetlb_count_add(pages_per_huge_page(h), mm);
5589788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
55901e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
5591c89357e2SDavid Hildenbrand 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
55921e8f889bSDavid Gibson 	}
55931e8f889bSDavid Gibson 
5594cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5595cb6acd01SMike Kravetz 
5596cb6acd01SMike Kravetz 	/*
55978f251a3dSMike Kravetz 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
55988f251a3dSMike Kravetz 	 * found in the pagecache may not have HPageMigratableset if they have
55998f251a3dSMike Kravetz 	 * been isolated for migration.
5600cb6acd01SMike Kravetz 	 */
5601cb6acd01SMike Kravetz 	if (new_page)
56028f251a3dSMike Kravetz 		SetHPageMigratable(page);
5603cb6acd01SMike Kravetz 
56044c887265SAdam Litke 	unlock_page(page);
56054c887265SAdam Litke out:
5606ac9b9c66SHugh Dickins 	return ret;
56074c887265SAdam Litke 
56084c887265SAdam Litke backout:
5609cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
56102b26736cSAndy Whitcroft backout_unlocked:
56114c887265SAdam Litke 	unlock_page(page);
5612c7b1850dSMike Kravetz 	/* restore reserve for newly allocated pages not in page cache */
5613c7b1850dSMike Kravetz 	if (new_page && !new_pagecache_page)
5614285b8dcaSHuang Ying 		restore_reserve_on_error(h, vma, haddr, page);
56154c887265SAdam Litke 	put_page(page);
56164c887265SAdam Litke 	goto out;
5617ac9b9c66SHugh Dickins }
5618ac9b9c66SHugh Dickins 
56198382d914SDavidlohr Bueso #ifdef CONFIG_SMP
5620188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
56218382d914SDavidlohr Bueso {
56228382d914SDavidlohr Bueso 	unsigned long key[2];
56238382d914SDavidlohr Bueso 	u32 hash;
56248382d914SDavidlohr Bueso 
56258382d914SDavidlohr Bueso 	key[0] = (unsigned long) mapping;
56268382d914SDavidlohr Bueso 	key[1] = idx;
56278382d914SDavidlohr Bueso 
562855254636SMike Kravetz 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
56298382d914SDavidlohr Bueso 
56308382d914SDavidlohr Bueso 	return hash & (num_fault_mutexes - 1);
56318382d914SDavidlohr Bueso }
56328382d914SDavidlohr Bueso #else
56338382d914SDavidlohr Bueso /*
56346c26d310SMiaohe Lin  * For uniprocessor systems we always use a single mutex, so just
56358382d914SDavidlohr Bueso  * return 0 and avoid the hashing overhead.
56368382d914SDavidlohr Bueso  */
5637188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
56388382d914SDavidlohr Bueso {
56398382d914SDavidlohr Bueso 	return 0;
56408382d914SDavidlohr Bueso }
56418382d914SDavidlohr Bueso #endif
56428382d914SDavidlohr Bueso 
56432b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5644788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
564586e5216fSAdam Litke {
56468382d914SDavidlohr Bueso 	pte_t *ptep, entry;
5647cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
56482b740303SSouptick Joarder 	vm_fault_t ret;
56498382d914SDavidlohr Bueso 	u32 hash;
56508382d914SDavidlohr Bueso 	pgoff_t idx;
56510fe6e20bSNaoya Horiguchi 	struct page *page = NULL;
565257303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
5653a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
56548382d914SDavidlohr Bueso 	struct address_space *mapping;
56550f792cf9SNaoya Horiguchi 	int need_wait_lock = 0;
5656285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
565786e5216fSAdam Litke 
5658285b8dcaSHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5659fd6a03edSNaoya Horiguchi 	if (ptep) {
5660c0d0381aSMike Kravetz 		/*
5661c0d0381aSMike Kravetz 		 * Since we hold no locks, ptep could be stale.  That is
5662c0d0381aSMike Kravetz 		 * OK as we are only making decisions based on content and
5663c0d0381aSMike Kravetz 		 * not actually modifying content here.
5664c0d0381aSMike Kravetz 		 */
5665fd6a03edSNaoya Horiguchi 		entry = huge_ptep_get(ptep);
5666290408d4SNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5667cb900f41SKirill A. Shutemov 			migration_entry_wait_huge(vma, mm, ptep);
5668290408d4SNaoya Horiguchi 			return 0;
5669290408d4SNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5670aa50d3a7SAndi Kleen 			return VM_FAULT_HWPOISON_LARGE |
5671972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5672b43a9990SMike Kravetz 	}
56738382d914SDavidlohr Bueso 
5674c0d0381aSMike Kravetz 	/*
5675c0d0381aSMike Kravetz 	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
567687bf91d3SMike Kravetz 	 * until finished with ptep.  This serves two purposes:
567787bf91d3SMike Kravetz 	 * 1) It prevents huge_pmd_unshare from being called elsewhere
567887bf91d3SMike Kravetz 	 *    and making the ptep no longer valid.
567987bf91d3SMike Kravetz 	 * 2) It synchronizes us with i_size modifications during truncation.
5680c0d0381aSMike Kravetz 	 *
5681c0d0381aSMike Kravetz 	 * ptep could have already be assigned via huge_pte_offset.  That
5682c0d0381aSMike Kravetz 	 * is OK, as huge_pte_alloc will return the same value unless
5683c0d0381aSMike Kravetz 	 * something has changed.
5684c0d0381aSMike Kravetz 	 */
5685ddeaab32SMike Kravetz 	mapping = vma->vm_file->f_mapping;
5686c0d0381aSMike Kravetz 	i_mmap_lock_read(mapping);
5687aec44e0fSPeter Xu 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5688c0d0381aSMike Kravetz 	if (!ptep) {
5689c0d0381aSMike Kravetz 		i_mmap_unlock_read(mapping);
5690c0d0381aSMike Kravetz 		return VM_FAULT_OOM;
5691c0d0381aSMike Kravetz 	}
5692ddeaab32SMike Kravetz 
56933935baa9SDavid Gibson 	/*
56943935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
56953935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
56963935baa9SDavid Gibson 	 * the same page in the page cache.
56973935baa9SDavid Gibson 	 */
5698c0d0381aSMike Kravetz 	idx = vma_hugecache_offset(h, vma, haddr);
5699188b04a7SWei Yang 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5700c672c7f2SMike Kravetz 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
57018382d914SDavidlohr Bueso 
57027f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
5703c64e912cSPeter Xu 	/* PTE markers should be handled the same way as none pte */
5704c64e912cSPeter Xu 	if (huge_pte_none_mostly(entry)) {
5705c64e912cSPeter Xu 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5706c64e912cSPeter Xu 				      entry, flags);
5707b4d1d99fSDavid Gibson 		goto out_mutex;
57083935baa9SDavid Gibson 	}
570986e5216fSAdam Litke 
571083c54070SNick Piggin 	ret = 0;
57111e8f889bSDavid Gibson 
571257303d80SAndy Whitcroft 	/*
57130f792cf9SNaoya Horiguchi 	 * entry could be a migration/hwpoison entry at this point, so this
57140f792cf9SNaoya Horiguchi 	 * check prevents the kernel from going below assuming that we have
57157c8de358SEthon Paul 	 * an active hugepage in pagecache. This goto expects the 2nd page
57167c8de358SEthon Paul 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
57177c8de358SEthon Paul 	 * properly handle it.
57180f792cf9SNaoya Horiguchi 	 */
57190f792cf9SNaoya Horiguchi 	if (!pte_present(entry))
57200f792cf9SNaoya Horiguchi 		goto out_mutex;
57210f792cf9SNaoya Horiguchi 
57220f792cf9SNaoya Horiguchi 	/*
5723c89357e2SDavid Hildenbrand 	 * If we are going to COW/unshare the mapping later, we examine the
5724c89357e2SDavid Hildenbrand 	 * pending reservations for this page now. This will ensure that any
572557303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
572657303d80SAndy Whitcroft 	 * spinlock. For private mappings, we also lookup the pagecache
572757303d80SAndy Whitcroft 	 * page now as it is used to determine if a reservation has been
572857303d80SAndy Whitcroft 	 * consumed.
572957303d80SAndy Whitcroft 	 */
5730c89357e2SDavid Hildenbrand 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5731c89357e2SDavid Hildenbrand 	    !huge_pte_write(entry)) {
5732285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
57332b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
5734b4d1d99fSDavid Gibson 			goto out_mutex;
57352b26736cSAndy Whitcroft 		}
57365e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5737285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
573857303d80SAndy Whitcroft 
5739f83a275dSMel Gorman 		if (!(vma->vm_flags & VM_MAYSHARE))
574057303d80SAndy Whitcroft 			pagecache_page = hugetlbfs_pagecache_page(h,
5741285b8dcaSHuang Ying 								vma, haddr);
574257303d80SAndy Whitcroft 	}
574357303d80SAndy Whitcroft 
57440f792cf9SNaoya Horiguchi 	ptl = huge_pte_lock(h, mm, ptep);
57450fe6e20bSNaoya Horiguchi 
5746c89357e2SDavid Hildenbrand 	/* Check for a racing update before calling hugetlb_wp() */
5747b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5748cb900f41SKirill A. Shutemov 		goto out_ptl;
5749b4d1d99fSDavid Gibson 
5750166f3eccSPeter Xu 	/* Handle userfault-wp first, before trying to lock more pages */
5751166f3eccSPeter Xu 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5752166f3eccSPeter Xu 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5753166f3eccSPeter Xu 		struct vm_fault vmf = {
5754166f3eccSPeter Xu 			.vma = vma,
5755166f3eccSPeter Xu 			.address = haddr,
5756166f3eccSPeter Xu 			.real_address = address,
5757166f3eccSPeter Xu 			.flags = flags,
5758166f3eccSPeter Xu 		};
5759166f3eccSPeter Xu 
5760166f3eccSPeter Xu 		spin_unlock(ptl);
5761166f3eccSPeter Xu 		if (pagecache_page) {
5762166f3eccSPeter Xu 			unlock_page(pagecache_page);
5763166f3eccSPeter Xu 			put_page(pagecache_page);
5764166f3eccSPeter Xu 		}
5765166f3eccSPeter Xu 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5766166f3eccSPeter Xu 		i_mmap_unlock_read(mapping);
5767166f3eccSPeter Xu 		return handle_userfault(&vmf, VM_UFFD_WP);
5768166f3eccSPeter Xu 	}
5769166f3eccSPeter Xu 
57700f792cf9SNaoya Horiguchi 	/*
5771c89357e2SDavid Hildenbrand 	 * hugetlb_wp() requires page locks of pte_page(entry) and
57720f792cf9SNaoya Horiguchi 	 * pagecache_page, so here we need take the former one
57730f792cf9SNaoya Horiguchi 	 * when page != pagecache_page or !pagecache_page.
57740f792cf9SNaoya Horiguchi 	 */
57750f792cf9SNaoya Horiguchi 	page = pte_page(entry);
57760f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
57770f792cf9SNaoya Horiguchi 		if (!trylock_page(page)) {
57780f792cf9SNaoya Horiguchi 			need_wait_lock = 1;
57790f792cf9SNaoya Horiguchi 			goto out_ptl;
57800f792cf9SNaoya Horiguchi 		}
57810f792cf9SNaoya Horiguchi 
57820f792cf9SNaoya Horiguchi 	get_page(page);
5783b4d1d99fSDavid Gibson 
5784c89357e2SDavid Hildenbrand 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5785106c992aSGerald Schaefer 		if (!huge_pte_write(entry)) {
5786c89357e2SDavid Hildenbrand 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5787cb900f41SKirill A. Shutemov 					 pagecache_page, ptl);
57880f792cf9SNaoya Horiguchi 			goto out_put_page;
5789c89357e2SDavid Hildenbrand 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5790106c992aSGerald Schaefer 			entry = huge_pte_mkdirty(entry);
5791b4d1d99fSDavid Gibson 		}
5792c89357e2SDavid Hildenbrand 	}
5793b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
5794285b8dcaSHuang Ying 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5795788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
5796285b8dcaSHuang Ying 		update_mmu_cache(vma, haddr, ptep);
57970f792cf9SNaoya Horiguchi out_put_page:
57980f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
57990f792cf9SNaoya Horiguchi 		unlock_page(page);
58000f792cf9SNaoya Horiguchi 	put_page(page);
5801cb900f41SKirill A. Shutemov out_ptl:
5802cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
580357303d80SAndy Whitcroft 
580457303d80SAndy Whitcroft 	if (pagecache_page) {
580557303d80SAndy Whitcroft 		unlock_page(pagecache_page);
580657303d80SAndy Whitcroft 		put_page(pagecache_page);
580757303d80SAndy Whitcroft 	}
5808b4d1d99fSDavid Gibson out_mutex:
5809c672c7f2SMike Kravetz 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5810c0d0381aSMike Kravetz 	i_mmap_unlock_read(mapping);
58110f792cf9SNaoya Horiguchi 	/*
58120f792cf9SNaoya Horiguchi 	 * Generally it's safe to hold refcount during waiting page lock. But
58130f792cf9SNaoya Horiguchi 	 * here we just wait to defer the next page fault to avoid busy loop and
58140f792cf9SNaoya Horiguchi 	 * the page is not used after unlocked before returning from the current
58150f792cf9SNaoya Horiguchi 	 * page fault. So we are safe from accessing freed page, even if we wait
58160f792cf9SNaoya Horiguchi 	 * here without taking refcount.
58170f792cf9SNaoya Horiguchi 	 */
58180f792cf9SNaoya Horiguchi 	if (need_wait_lock)
58190f792cf9SNaoya Horiguchi 		wait_on_page_locked(page);
58201e8f889bSDavid Gibson 	return ret;
582186e5216fSAdam Litke }
582286e5216fSAdam Litke 
5823714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
58248fb5debcSMike Kravetz /*
58258fb5debcSMike Kravetz  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
58268fb5debcSMike Kravetz  * modifications for huge pages.
58278fb5debcSMike Kravetz  */
58288fb5debcSMike Kravetz int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
58298fb5debcSMike Kravetz 			    pte_t *dst_pte,
58308fb5debcSMike Kravetz 			    struct vm_area_struct *dst_vma,
58318fb5debcSMike Kravetz 			    unsigned long dst_addr,
58328fb5debcSMike Kravetz 			    unsigned long src_addr,
5833f6191471SAxel Rasmussen 			    enum mcopy_atomic_mode mode,
58346041c691SPeter Xu 			    struct page **pagep,
58356041c691SPeter Xu 			    bool wp_copy)
58368fb5debcSMike Kravetz {
5837f6191471SAxel Rasmussen 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
58388cc5fcbbSMina Almasry 	struct hstate *h = hstate_vma(dst_vma);
58398cc5fcbbSMina Almasry 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
58408cc5fcbbSMina Almasry 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
58411e392147SAndrea Arcangeli 	unsigned long size;
58421c9e8defSMike Kravetz 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
58438fb5debcSMike Kravetz 	pte_t _dst_pte;
58448fb5debcSMike Kravetz 	spinlock_t *ptl;
58458cc5fcbbSMina Almasry 	int ret = -ENOMEM;
58468fb5debcSMike Kravetz 	struct page *page;
5847f6191471SAxel Rasmussen 	int writable;
5848cc30042dSMina Almasry 	bool page_in_pagecache = false;
58498fb5debcSMike Kravetz 
5850f6191471SAxel Rasmussen 	if (is_continue) {
5851f6191471SAxel Rasmussen 		ret = -EFAULT;
5852f6191471SAxel Rasmussen 		page = find_lock_page(mapping, idx);
5853f6191471SAxel Rasmussen 		if (!page)
5854f6191471SAxel Rasmussen 			goto out;
5855cc30042dSMina Almasry 		page_in_pagecache = true;
5856f6191471SAxel Rasmussen 	} else if (!*pagep) {
5857d84cf06eSMina Almasry 		/* If a page already exists, then it's UFFDIO_COPY for
5858d84cf06eSMina Almasry 		 * a non-missing case. Return -EEXIST.
5859d84cf06eSMina Almasry 		 */
5860d84cf06eSMina Almasry 		if (vm_shared &&
5861d84cf06eSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5862d84cf06eSMina Almasry 			ret = -EEXIST;
58638fb5debcSMike Kravetz 			goto out;
5864d84cf06eSMina Almasry 		}
5865d84cf06eSMina Almasry 
5866d84cf06eSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
5867d84cf06eSMina Almasry 		if (IS_ERR(page)) {
5868d84cf06eSMina Almasry 			ret = -ENOMEM;
5869d84cf06eSMina Almasry 			goto out;
5870d84cf06eSMina Almasry 		}
58718fb5debcSMike Kravetz 
58728fb5debcSMike Kravetz 		ret = copy_huge_page_from_user(page,
58738fb5debcSMike Kravetz 						(const void __user *) src_addr,
5874810a56b9SMike Kravetz 						pages_per_huge_page(h), false);
58758fb5debcSMike Kravetz 
5876c1e8d7c6SMichel Lespinasse 		/* fallback to copy_from_user outside mmap_lock */
58778fb5debcSMike Kravetz 		if (unlikely(ret)) {
58789e368259SAndrea Arcangeli 			ret = -ENOENT;
58798cc5fcbbSMina Almasry 			/* Free the allocated page which may have
58808cc5fcbbSMina Almasry 			 * consumed a reservation.
58818cc5fcbbSMina Almasry 			 */
58828cc5fcbbSMina Almasry 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
58838cc5fcbbSMina Almasry 			put_page(page);
58848cc5fcbbSMina Almasry 
58858cc5fcbbSMina Almasry 			/* Allocate a temporary page to hold the copied
58868cc5fcbbSMina Almasry 			 * contents.
58878cc5fcbbSMina Almasry 			 */
58888cc5fcbbSMina Almasry 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
58898cc5fcbbSMina Almasry 			if (!page) {
58908cc5fcbbSMina Almasry 				ret = -ENOMEM;
58918cc5fcbbSMina Almasry 				goto out;
58928cc5fcbbSMina Almasry 			}
58938fb5debcSMike Kravetz 			*pagep = page;
58948cc5fcbbSMina Almasry 			/* Set the outparam pagep and return to the caller to
58958cc5fcbbSMina Almasry 			 * copy the contents outside the lock. Don't free the
58968cc5fcbbSMina Almasry 			 * page.
58978cc5fcbbSMina Almasry 			 */
58988fb5debcSMike Kravetz 			goto out;
58998fb5debcSMike Kravetz 		}
59008fb5debcSMike Kravetz 	} else {
59018cc5fcbbSMina Almasry 		if (vm_shared &&
59028cc5fcbbSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
59038cc5fcbbSMina Almasry 			put_page(*pagep);
59048cc5fcbbSMina Almasry 			ret = -EEXIST;
59058cc5fcbbSMina Almasry 			*pagep = NULL;
59068cc5fcbbSMina Almasry 			goto out;
59078cc5fcbbSMina Almasry 		}
59088cc5fcbbSMina Almasry 
59098cc5fcbbSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
59108cc5fcbbSMina Almasry 		if (IS_ERR(page)) {
59118cc5fcbbSMina Almasry 			ret = -ENOMEM;
59128cc5fcbbSMina Almasry 			*pagep = NULL;
59138cc5fcbbSMina Almasry 			goto out;
59148cc5fcbbSMina Almasry 		}
591534892366SMuchun Song 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
591634892366SMuchun Song 				    pages_per_huge_page(h));
59178cc5fcbbSMina Almasry 		put_page(*pagep);
59188fb5debcSMike Kravetz 		*pagep = NULL;
59198fb5debcSMike Kravetz 	}
59208fb5debcSMike Kravetz 
59218fb5debcSMike Kravetz 	/*
59228fb5debcSMike Kravetz 	 * The memory barrier inside __SetPageUptodate makes sure that
59238fb5debcSMike Kravetz 	 * preceding stores to the page contents become visible before
59248fb5debcSMike Kravetz 	 * the set_pte_at() write.
59258fb5debcSMike Kravetz 	 */
59268fb5debcSMike Kravetz 	__SetPageUptodate(page);
59278fb5debcSMike Kravetz 
5928f6191471SAxel Rasmussen 	/* Add shared, newly allocated pages to the page cache. */
5929f6191471SAxel Rasmussen 	if (vm_shared && !is_continue) {
59301e392147SAndrea Arcangeli 		size = i_size_read(mapping->host) >> huge_page_shift(h);
59311e392147SAndrea Arcangeli 		ret = -EFAULT;
59321e392147SAndrea Arcangeli 		if (idx >= size)
59331e392147SAndrea Arcangeli 			goto out_release_nounlock;
59341c9e8defSMike Kravetz 
59351e392147SAndrea Arcangeli 		/*
59361e392147SAndrea Arcangeli 		 * Serialization between remove_inode_hugepages() and
59371e392147SAndrea Arcangeli 		 * huge_add_to_page_cache() below happens through the
59381e392147SAndrea Arcangeli 		 * hugetlb_fault_mutex_table that here must be hold by
59391e392147SAndrea Arcangeli 		 * the caller.
59401e392147SAndrea Arcangeli 		 */
59411c9e8defSMike Kravetz 		ret = huge_add_to_page_cache(page, mapping, idx);
59421c9e8defSMike Kravetz 		if (ret)
59431c9e8defSMike Kravetz 			goto out_release_nounlock;
5944cc30042dSMina Almasry 		page_in_pagecache = true;
59451c9e8defSMike Kravetz 	}
59461c9e8defSMike Kravetz 
59478fb5debcSMike Kravetz 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
59488fb5debcSMike Kravetz 	spin_lock(ptl);
59498fb5debcSMike Kravetz 
59501e392147SAndrea Arcangeli 	/*
59511e392147SAndrea Arcangeli 	 * Recheck the i_size after holding PT lock to make sure not
59521e392147SAndrea Arcangeli 	 * to leave any page mapped (as page_mapped()) beyond the end
59531e392147SAndrea Arcangeli 	 * of the i_size (remove_inode_hugepages() is strict about
59541e392147SAndrea Arcangeli 	 * enforcing that). If we bail out here, we'll also leave a
59551e392147SAndrea Arcangeli 	 * page in the radix tree in the vm_shared case beyond the end
59561e392147SAndrea Arcangeli 	 * of the i_size, but remove_inode_hugepages() will take care
59571e392147SAndrea Arcangeli 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
59581e392147SAndrea Arcangeli 	 */
59591e392147SAndrea Arcangeli 	size = i_size_read(mapping->host) >> huge_page_shift(h);
59601e392147SAndrea Arcangeli 	ret = -EFAULT;
59611e392147SAndrea Arcangeli 	if (idx >= size)
59621e392147SAndrea Arcangeli 		goto out_release_unlock;
59631e392147SAndrea Arcangeli 
59648fb5debcSMike Kravetz 	ret = -EEXIST;
59656041c691SPeter Xu 	/*
59666041c691SPeter Xu 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
59676041c691SPeter Xu 	 * registered, we firstly wr-protect a none pte which has no page cache
59686041c691SPeter Xu 	 * page backing it, then access the page.
59696041c691SPeter Xu 	 */
59706041c691SPeter Xu 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
59718fb5debcSMike Kravetz 		goto out_release_unlock;
59728fb5debcSMike Kravetz 
59731c9e8defSMike Kravetz 	if (vm_shared) {
5974fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
59751c9e8defSMike Kravetz 	} else {
5976d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
59778fb5debcSMike Kravetz 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
59781c9e8defSMike Kravetz 	}
59798fb5debcSMike Kravetz 
59806041c691SPeter Xu 	/*
59816041c691SPeter Xu 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
59826041c691SPeter Xu 	 * with wp flag set, don't set pte write bit.
59836041c691SPeter Xu 	 */
59846041c691SPeter Xu 	if (wp_copy || (is_continue && !vm_shared))
5985f6191471SAxel Rasmussen 		writable = 0;
5986f6191471SAxel Rasmussen 	else
5987f6191471SAxel Rasmussen 		writable = dst_vma->vm_flags & VM_WRITE;
5988f6191471SAxel Rasmussen 
5989f6191471SAxel Rasmussen 	_dst_pte = make_huge_pte(dst_vma, page, writable);
59906041c691SPeter Xu 	/*
59916041c691SPeter Xu 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
59926041c691SPeter Xu 	 * extremely important for hugetlbfs for now since swapping is not
59936041c691SPeter Xu 	 * supported, but we should still be clear in that this page cannot be
59946041c691SPeter Xu 	 * thrown away at will, even if write bit not set.
59956041c691SPeter Xu 	 */
59968fb5debcSMike Kravetz 	_dst_pte = huge_pte_mkdirty(_dst_pte);
59978fb5debcSMike Kravetz 	_dst_pte = pte_mkyoung(_dst_pte);
59988fb5debcSMike Kravetz 
59996041c691SPeter Xu 	if (wp_copy)
60006041c691SPeter Xu 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
60016041c691SPeter Xu 
60028fb5debcSMike Kravetz 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
60038fb5debcSMike Kravetz 
60048fb5debcSMike Kravetz 	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
60058fb5debcSMike Kravetz 					dst_vma->vm_flags & VM_WRITE);
60068fb5debcSMike Kravetz 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
60078fb5debcSMike Kravetz 
60088fb5debcSMike Kravetz 	/* No need to invalidate - it was non-present before */
60098fb5debcSMike Kravetz 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
60108fb5debcSMike Kravetz 
60118fb5debcSMike Kravetz 	spin_unlock(ptl);
6012f6191471SAxel Rasmussen 	if (!is_continue)
60138f251a3dSMike Kravetz 		SetHPageMigratable(page);
6014f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
60151c9e8defSMike Kravetz 		unlock_page(page);
60168fb5debcSMike Kravetz 	ret = 0;
60178fb5debcSMike Kravetz out:
60188fb5debcSMike Kravetz 	return ret;
60198fb5debcSMike Kravetz out_release_unlock:
60208fb5debcSMike Kravetz 	spin_unlock(ptl);
6021f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
60221c9e8defSMike Kravetz 		unlock_page(page);
60235af10dfdSAndrea Arcangeli out_release_nounlock:
6024cc30042dSMina Almasry 	if (!page_in_pagecache)
6025846be085SMike Kravetz 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
60268fb5debcSMike Kravetz 	put_page(page);
60278fb5debcSMike Kravetz 	goto out;
60288fb5debcSMike Kravetz }
6029714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
60308fb5debcSMike Kravetz 
603182e5d378SJoao Martins static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
603282e5d378SJoao Martins 				 int refs, struct page **pages,
603382e5d378SJoao Martins 				 struct vm_area_struct **vmas)
603482e5d378SJoao Martins {
603582e5d378SJoao Martins 	int nr;
603682e5d378SJoao Martins 
603782e5d378SJoao Martins 	for (nr = 0; nr < refs; nr++) {
603882e5d378SJoao Martins 		if (likely(pages))
603982e5d378SJoao Martins 			pages[nr] = mem_map_offset(page, nr);
604082e5d378SJoao Martins 		if (vmas)
604182e5d378SJoao Martins 			vmas[nr] = vma;
604282e5d378SJoao Martins 	}
604382e5d378SJoao Martins }
604482e5d378SJoao Martins 
6045a7f22660SDavid Hildenbrand static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
6046a7f22660SDavid Hildenbrand 					       bool *unshare)
6047a7f22660SDavid Hildenbrand {
6048a7f22660SDavid Hildenbrand 	pte_t pteval = huge_ptep_get(pte);
6049a7f22660SDavid Hildenbrand 
6050a7f22660SDavid Hildenbrand 	*unshare = false;
6051a7f22660SDavid Hildenbrand 	if (is_swap_pte(pteval))
6052a7f22660SDavid Hildenbrand 		return true;
6053a7f22660SDavid Hildenbrand 	if (huge_pte_write(pteval))
6054a7f22660SDavid Hildenbrand 		return false;
6055a7f22660SDavid Hildenbrand 	if (flags & FOLL_WRITE)
6056a7f22660SDavid Hildenbrand 		return true;
6057a7f22660SDavid Hildenbrand 	if (gup_must_unshare(flags, pte_page(pteval))) {
6058a7f22660SDavid Hildenbrand 		*unshare = true;
6059a7f22660SDavid Hildenbrand 		return true;
6060a7f22660SDavid Hildenbrand 	}
6061a7f22660SDavid Hildenbrand 	return false;
6062a7f22660SDavid Hildenbrand }
6063a7f22660SDavid Hildenbrand 
606428a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
606563551ae0SDavid Gibson 			 struct page **pages, struct vm_area_struct **vmas,
606628a35716SMichel Lespinasse 			 unsigned long *position, unsigned long *nr_pages,
60674f6da934SPeter Xu 			 long i, unsigned int flags, int *locked)
606863551ae0SDavid Gibson {
6069d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
6070d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
607128a35716SMichel Lespinasse 	unsigned long remainder = *nr_pages;
6072a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
60730fa5bc40SJoao Martins 	int err = -EFAULT, refs;
607463551ae0SDavid Gibson 
607563551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
607663551ae0SDavid Gibson 		pte_t *pte;
6077cb900f41SKirill A. Shutemov 		spinlock_t *ptl = NULL;
6078a7f22660SDavid Hildenbrand 		bool unshare = false;
60792a15efc9SHugh Dickins 		int absent;
608063551ae0SDavid Gibson 		struct page *page;
608163551ae0SDavid Gibson 
60824c887265SAdam Litke 		/*
608302057967SDavid Rientjes 		 * If we have a pending SIGKILL, don't keep faulting pages and
608402057967SDavid Rientjes 		 * potentially allocating memory.
608502057967SDavid Rientjes 		 */
6086fa45f116SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
608702057967SDavid Rientjes 			remainder = 0;
608802057967SDavid Rientjes 			break;
608902057967SDavid Rientjes 		}
609002057967SDavid Rientjes 
609102057967SDavid Rientjes 		/*
60924c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
60932a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
60944c887265SAdam Litke 		 * first, for the page indexing below to work.
6095cb900f41SKirill A. Shutemov 		 *
6096cb900f41SKirill A. Shutemov 		 * Note that page table lock is not held when pte is null.
60974c887265SAdam Litke 		 */
60987868a208SPunit Agrawal 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
60997868a208SPunit Agrawal 				      huge_page_size(h));
6100cb900f41SKirill A. Shutemov 		if (pte)
6101cb900f41SKirill A. Shutemov 			ptl = huge_pte_lock(h, mm, pte);
61022a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
610363551ae0SDavid Gibson 
61042a15efc9SHugh Dickins 		/*
61052a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
61063ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
61073ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
61083ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
61093ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
61102a15efc9SHugh Dickins 		 */
61113ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
61123ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6113cb900f41SKirill A. Shutemov 			if (pte)
6114cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
61152a15efc9SHugh Dickins 			remainder = 0;
61162a15efc9SHugh Dickins 			break;
61172a15efc9SHugh Dickins 		}
61182a15efc9SHugh Dickins 
61199cc3a5bdSNaoya Horiguchi 		/*
61209cc3a5bdSNaoya Horiguchi 		 * We need call hugetlb_fault for both hugepages under migration
61219cc3a5bdSNaoya Horiguchi 		 * (in which case hugetlb_fault waits for the migration,) and
61229cc3a5bdSNaoya Horiguchi 		 * hwpoisoned hugepages (in which case we need to prevent the
61239cc3a5bdSNaoya Horiguchi 		 * caller from accessing to them.) In order to do this, we use
61249cc3a5bdSNaoya Horiguchi 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
61259cc3a5bdSNaoya Horiguchi 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
61269cc3a5bdSNaoya Horiguchi 		 * both cases, and because we can't follow correct pages
61279cc3a5bdSNaoya Horiguchi 		 * directly from any kind of swap entries.
61289cc3a5bdSNaoya Horiguchi 		 */
6129a7f22660SDavid Hildenbrand 		if (absent ||
6130a7f22660SDavid Hildenbrand 		    __follow_hugetlb_must_fault(flags, pte, &unshare)) {
61312b740303SSouptick Joarder 			vm_fault_t ret;
613287ffc118SAndrea Arcangeli 			unsigned int fault_flags = 0;
61334c887265SAdam Litke 
6134cb900f41SKirill A. Shutemov 			if (pte)
6135cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
613687ffc118SAndrea Arcangeli 			if (flags & FOLL_WRITE)
613787ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_WRITE;
6138a7f22660SDavid Hildenbrand 			else if (unshare)
6139a7f22660SDavid Hildenbrand 				fault_flags |= FAULT_FLAG_UNSHARE;
61404f6da934SPeter Xu 			if (locked)
614171335f37SPeter Xu 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
614271335f37SPeter Xu 					FAULT_FLAG_KILLABLE;
614387ffc118SAndrea Arcangeli 			if (flags & FOLL_NOWAIT)
614487ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
614587ffc118SAndrea Arcangeli 					FAULT_FLAG_RETRY_NOWAIT;
614687ffc118SAndrea Arcangeli 			if (flags & FOLL_TRIED) {
61474426e945SPeter Xu 				/*
61484426e945SPeter Xu 				 * Note: FAULT_FLAG_ALLOW_RETRY and
61494426e945SPeter Xu 				 * FAULT_FLAG_TRIED can co-exist
61504426e945SPeter Xu 				 */
615187ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_TRIED;
615287ffc118SAndrea Arcangeli 			}
615387ffc118SAndrea Arcangeli 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
615487ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_ERROR) {
61552be7cfedSDaniel Jordan 				err = vm_fault_to_errno(ret, flags);
61561c59827dSHugh Dickins 				remainder = 0;
61571c59827dSHugh Dickins 				break;
61581c59827dSHugh Dickins 			}
615987ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_RETRY) {
61604f6da934SPeter Xu 				if (locked &&
61611ac25013SAndrea Arcangeli 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
61624f6da934SPeter Xu 					*locked = 0;
616387ffc118SAndrea Arcangeli 				*nr_pages = 0;
616487ffc118SAndrea Arcangeli 				/*
616587ffc118SAndrea Arcangeli 				 * VM_FAULT_RETRY must not return an
616687ffc118SAndrea Arcangeli 				 * error, it will return zero
616787ffc118SAndrea Arcangeli 				 * instead.
616887ffc118SAndrea Arcangeli 				 *
616987ffc118SAndrea Arcangeli 				 * No need to update "position" as the
617087ffc118SAndrea Arcangeli 				 * caller will not check it after
617187ffc118SAndrea Arcangeli 				 * *nr_pages is set to 0.
617287ffc118SAndrea Arcangeli 				 */
617387ffc118SAndrea Arcangeli 				return i;
617487ffc118SAndrea Arcangeli 			}
617587ffc118SAndrea Arcangeli 			continue;
617687ffc118SAndrea Arcangeli 		}
617763551ae0SDavid Gibson 
6178a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
61797f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
61808fde12caSLinus Torvalds 
6181b6a2619cSDavid Hildenbrand 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6182b6a2619cSDavid Hildenbrand 			       !PageAnonExclusive(page), page);
6183b6a2619cSDavid Hildenbrand 
61848fde12caSLinus Torvalds 		/*
6185acbfb087SZhigang Lu 		 * If subpage information not requested, update counters
6186acbfb087SZhigang Lu 		 * and skip the same_page loop below.
6187acbfb087SZhigang Lu 		 */
6188acbfb087SZhigang Lu 		if (!pages && !vmas && !pfn_offset &&
6189acbfb087SZhigang Lu 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6190acbfb087SZhigang Lu 		    (remainder >= pages_per_huge_page(h))) {
6191acbfb087SZhigang Lu 			vaddr += huge_page_size(h);
6192acbfb087SZhigang Lu 			remainder -= pages_per_huge_page(h);
6193acbfb087SZhigang Lu 			i += pages_per_huge_page(h);
6194acbfb087SZhigang Lu 			spin_unlock(ptl);
6195acbfb087SZhigang Lu 			continue;
6196acbfb087SZhigang Lu 		}
6197acbfb087SZhigang Lu 
6198d08af0a5SJoao Martins 		/* vaddr may not be aligned to PAGE_SIZE */
6199d08af0a5SJoao Martins 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6200d08af0a5SJoao Martins 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
62010fa5bc40SJoao Martins 
620282e5d378SJoao Martins 		if (pages || vmas)
620382e5d378SJoao Martins 			record_subpages_vmas(mem_map_offset(page, pfn_offset),
620482e5d378SJoao Martins 					     vma, refs,
620582e5d378SJoao Martins 					     likely(pages) ? pages + i : NULL,
620682e5d378SJoao Martins 					     vmas ? vmas + i : NULL);
620763551ae0SDavid Gibson 
620882e5d378SJoao Martins 		if (pages) {
62090fa5bc40SJoao Martins 			/*
6210822951d8SMatthew Wilcox (Oracle) 			 * try_grab_folio() should always succeed here,
62110fa5bc40SJoao Martins 			 * because: a) we hold the ptl lock, and b) we've just
62120fa5bc40SJoao Martins 			 * checked that the huge page is present in the page
62130fa5bc40SJoao Martins 			 * tables. If the huge page is present, then the tail
62140fa5bc40SJoao Martins 			 * pages must also be present. The ptl prevents the
62150fa5bc40SJoao Martins 			 * head page and tail pages from being rearranged in
62160fa5bc40SJoao Martins 			 * any way. So this page must be available at this
62170fa5bc40SJoao Martins 			 * point, unless the page refcount overflowed:
62180fa5bc40SJoao Martins 			 */
6219822951d8SMatthew Wilcox (Oracle) 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
62200fa5bc40SJoao Martins 							 flags))) {
62210fa5bc40SJoao Martins 				spin_unlock(ptl);
62220fa5bc40SJoao Martins 				remainder = 0;
62230fa5bc40SJoao Martins 				err = -ENOMEM;
62240fa5bc40SJoao Martins 				break;
62250fa5bc40SJoao Martins 			}
6226d5d4b0aaSChen, Kenneth W 		}
622782e5d378SJoao Martins 
622882e5d378SJoao Martins 		vaddr += (refs << PAGE_SHIFT);
622982e5d378SJoao Martins 		remainder -= refs;
623082e5d378SJoao Martins 		i += refs;
623182e5d378SJoao Martins 
6232cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
623363551ae0SDavid Gibson 	}
623428a35716SMichel Lespinasse 	*nr_pages = remainder;
623587ffc118SAndrea Arcangeli 	/*
623687ffc118SAndrea Arcangeli 	 * setting position is actually required only if remainder is
623787ffc118SAndrea Arcangeli 	 * not zero but it's faster not to add a "if (remainder)"
623887ffc118SAndrea Arcangeli 	 * branch.
623987ffc118SAndrea Arcangeli 	 */
624063551ae0SDavid Gibson 	*position = vaddr;
624163551ae0SDavid Gibson 
62422be7cfedSDaniel Jordan 	return i ? i : err;
624363551ae0SDavid Gibson }
62448f860591SZhang, Yanmin 
62457da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
62465a90d5a1SPeter Xu 		unsigned long address, unsigned long end,
62475a90d5a1SPeter Xu 		pgprot_t newprot, unsigned long cp_flags)
62488f860591SZhang, Yanmin {
62498f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
62508f860591SZhang, Yanmin 	unsigned long start = address;
62518f860591SZhang, Yanmin 	pte_t *ptep;
62528f860591SZhang, Yanmin 	pte_t pte;
6253a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
6254*60dfaad6SPeter Xu 	unsigned long pages = 0, psize = huge_page_size(h);
6255dff11abeSMike Kravetz 	bool shared_pmd = false;
6256ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
62575a90d5a1SPeter Xu 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
62585a90d5a1SPeter Xu 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6259dff11abeSMike Kravetz 
6260dff11abeSMike Kravetz 	/*
6261dff11abeSMike Kravetz 	 * In the case of shared PMDs, the area to flush could be beyond
6262ac46d4f3SJérôme Glisse 	 * start/end.  Set range.start/range.end to cover the maximum possible
6263dff11abeSMike Kravetz 	 * range if PMD sharing is possible.
6264dff11abeSMike Kravetz 	 */
62657269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
62667269f999SJérôme Glisse 				0, vma, mm, start, end);
6267ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
62688f860591SZhang, Yanmin 
62698f860591SZhang, Yanmin 	BUG_ON(address >= end);
6270ac46d4f3SJérôme Glisse 	flush_cache_range(vma, range.start, range.end);
62718f860591SZhang, Yanmin 
6272ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
627383cde9e8SDavidlohr Bueso 	i_mmap_lock_write(vma->vm_file->f_mapping);
6274*60dfaad6SPeter Xu 	for (; address < end; address += psize) {
6275cb900f41SKirill A. Shutemov 		spinlock_t *ptl;
6276*60dfaad6SPeter Xu 		ptep = huge_pte_offset(mm, address, psize);
62778f860591SZhang, Yanmin 		if (!ptep)
62788f860591SZhang, Yanmin 			continue;
6279cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
628034ae204fSMike Kravetz 		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
6281*60dfaad6SPeter Xu 			/*
6282*60dfaad6SPeter Xu 			 * When uffd-wp is enabled on the vma, unshare
6283*60dfaad6SPeter Xu 			 * shouldn't happen at all.  Warn about it if it
6284*60dfaad6SPeter Xu 			 * happened due to some reason.
6285*60dfaad6SPeter Xu 			 */
6286*60dfaad6SPeter Xu 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
62877da4d641SPeter Zijlstra 			pages++;
6288cb900f41SKirill A. Shutemov 			spin_unlock(ptl);
6289dff11abeSMike Kravetz 			shared_pmd = true;
629039dde65cSChen, Kenneth W 			continue;
62917da4d641SPeter Zijlstra 		}
6292a8bda28dSNaoya Horiguchi 		pte = huge_ptep_get(ptep);
6293a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6294a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6295a8bda28dSNaoya Horiguchi 			continue;
6296a8bda28dSNaoya Horiguchi 		}
6297a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6298a8bda28dSNaoya Horiguchi 			swp_entry_t entry = pte_to_swp_entry(pte);
62996c287605SDavid Hildenbrand 			struct page *page = pfn_swap_entry_to_page(entry);
6300a8bda28dSNaoya Horiguchi 
63016c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(entry)) {
6302a8bda28dSNaoya Horiguchi 				pte_t newpte;
6303a8bda28dSNaoya Horiguchi 
63046c287605SDavid Hildenbrand 				if (PageAnon(page))
63056c287605SDavid Hildenbrand 					entry = make_readable_exclusive_migration_entry(
63066c287605SDavid Hildenbrand 								swp_offset(entry));
63076c287605SDavid Hildenbrand 				else
63084dd845b5SAlistair Popple 					entry = make_readable_migration_entry(
63094dd845b5SAlistair Popple 								swp_offset(entry));
6310a8bda28dSNaoya Horiguchi 				newpte = swp_entry_to_pte(entry);
63115a90d5a1SPeter Xu 				if (uffd_wp)
63125a90d5a1SPeter Xu 					newpte = pte_swp_mkuffd_wp(newpte);
63135a90d5a1SPeter Xu 				else if (uffd_wp_resolve)
63145a90d5a1SPeter Xu 					newpte = pte_swp_clear_uffd_wp(newpte);
6315e5251fd4SPunit Agrawal 				set_huge_swap_pte_at(mm, address, ptep,
6316*60dfaad6SPeter Xu 						     newpte, psize);
6317a8bda28dSNaoya Horiguchi 				pages++;
6318a8bda28dSNaoya Horiguchi 			}
6319a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6320a8bda28dSNaoya Horiguchi 			continue;
6321a8bda28dSNaoya Horiguchi 		}
6322*60dfaad6SPeter Xu 		if (unlikely(pte_marker_uffd_wp(pte))) {
6323*60dfaad6SPeter Xu 			/*
6324*60dfaad6SPeter Xu 			 * This is changing a non-present pte into a none pte,
6325*60dfaad6SPeter Xu 			 * no need for huge_ptep_modify_prot_start/commit().
6326*60dfaad6SPeter Xu 			 */
6327*60dfaad6SPeter Xu 			if (uffd_wp_resolve)
6328*60dfaad6SPeter Xu 				huge_pte_clear(mm, address, ptep, psize);
6329*60dfaad6SPeter Xu 		}
6330a8bda28dSNaoya Horiguchi 		if (!huge_pte_none(pte)) {
6331023bdd00SAneesh Kumar K.V 			pte_t old_pte;
633279c1c594SChristophe Leroy 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6333023bdd00SAneesh Kumar K.V 
6334023bdd00SAneesh Kumar K.V 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
633516785bd7SAnshuman Khandual 			pte = huge_pte_modify(old_pte, newprot);
633679c1c594SChristophe Leroy 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
63375a90d5a1SPeter Xu 			if (uffd_wp)
63385a90d5a1SPeter Xu 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
63395a90d5a1SPeter Xu 			else if (uffd_wp_resolve)
63405a90d5a1SPeter Xu 				pte = huge_pte_clear_uffd_wp(pte);
6341023bdd00SAneesh Kumar K.V 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
63427da4d641SPeter Zijlstra 			pages++;
6343*60dfaad6SPeter Xu 		} else {
6344*60dfaad6SPeter Xu 			/* None pte */
6345*60dfaad6SPeter Xu 			if (unlikely(uffd_wp))
6346*60dfaad6SPeter Xu 				/* Safe to modify directly (none->non-present). */
6347*60dfaad6SPeter Xu 				set_huge_pte_at(mm, address, ptep,
6348*60dfaad6SPeter Xu 						make_pte_marker(PTE_MARKER_UFFD_WP));
63498f860591SZhang, Yanmin 		}
6350cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
63518f860591SZhang, Yanmin 	}
6352d833352aSMel Gorman 	/*
6353c8c06efaSDavidlohr Bueso 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6354d833352aSMel Gorman 	 * may have cleared our pud entry and done put_page on the page table:
6355c8c06efaSDavidlohr Bueso 	 * once we release i_mmap_rwsem, another task can do the final put_page
6356dff11abeSMike Kravetz 	 * and that page table be reused and filled with junk.  If we actually
6357dff11abeSMike Kravetz 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6358d833352aSMel Gorman 	 */
6359dff11abeSMike Kravetz 	if (shared_pmd)
6360ac46d4f3SJérôme Glisse 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6361dff11abeSMike Kravetz 	else
63625491ae7bSAneesh Kumar K.V 		flush_hugetlb_tlb_range(vma, start, end);
63630f10851eSJérôme Glisse 	/*
63640f10851eSJérôme Glisse 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
63650f10851eSJérôme Glisse 	 * page table protection not changing it to point to a new page.
63660f10851eSJérôme Glisse 	 *
6367ad56b738SMike Rapoport 	 * See Documentation/vm/mmu_notifier.rst
63680f10851eSJérôme Glisse 	 */
636983cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6370ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
63717da4d641SPeter Zijlstra 
63727da4d641SPeter Zijlstra 	return pages << h->order;
63738f860591SZhang, Yanmin }
63748f860591SZhang, Yanmin 
637533b8f84aSMike Kravetz /* Return true if reservation was successful, false otherwise.  */
637633b8f84aSMike Kravetz bool hugetlb_reserve_pages(struct inode *inode,
6377a1e78772SMel Gorman 					long from, long to,
63785a6fe125SMel Gorman 					struct vm_area_struct *vma,
6379ca16d140SKOSAKI Motohiro 					vm_flags_t vm_flags)
6380e4e574b7SAdam Litke {
638133b8f84aSMike Kravetz 	long chg, add = -1;
6382a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
638390481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
63849119a41eSJoonsoo Kim 	struct resv_map *resv_map;
6385075a61d0SMina Almasry 	struct hugetlb_cgroup *h_cg = NULL;
63860db9d74eSMina Almasry 	long gbl_reserve, regions_needed = 0;
6387e4e574b7SAdam Litke 
638863489f8eSMike Kravetz 	/* This should never happen */
638963489f8eSMike Kravetz 	if (from > to) {
639063489f8eSMike Kravetz 		VM_WARN(1, "%s called with a negative range\n", __func__);
639133b8f84aSMike Kravetz 		return false;
639263489f8eSMike Kravetz 	}
639363489f8eSMike Kravetz 
6394a1e78772SMel Gorman 	/*
639517c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
639617c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
639790481622SDavid Gibson 	 * without using reserves
639817c9d12eSMel Gorman 	 */
6399ca16d140SKOSAKI Motohiro 	if (vm_flags & VM_NORESERVE)
640033b8f84aSMike Kravetz 		return true;
640117c9d12eSMel Gorman 
640217c9d12eSMel Gorman 	/*
6403a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
6404a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
6405a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
6406a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6407a1e78772SMel Gorman 	 */
64089119a41eSJoonsoo Kim 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6409f27a5136SMike Kravetz 		/*
6410f27a5136SMike Kravetz 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6411f27a5136SMike Kravetz 		 * called for inodes for which resv_maps were created (see
6412f27a5136SMike Kravetz 		 * hugetlbfs_get_inode).
6413f27a5136SMike Kravetz 		 */
64144e35f483SJoonsoo Kim 		resv_map = inode_resv_map(inode);
64159119a41eSJoonsoo Kim 
64160db9d74eSMina Almasry 		chg = region_chg(resv_map, from, to, &regions_needed);
64179119a41eSJoonsoo Kim 
64189119a41eSJoonsoo Kim 	} else {
6419e9fe92aeSMina Almasry 		/* Private mapping. */
64209119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
64215a6fe125SMel Gorman 		if (!resv_map)
642233b8f84aSMike Kravetz 			return false;
64235a6fe125SMel Gorman 
642417c9d12eSMel Gorman 		chg = to - from;
642517c9d12eSMel Gorman 
64265a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
64275a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
64285a6fe125SMel Gorman 	}
64295a6fe125SMel Gorman 
643033b8f84aSMike Kravetz 	if (chg < 0)
6431c50ac050SDave Hansen 		goto out_err;
643217c9d12eSMel Gorman 
643333b8f84aSMike Kravetz 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
643433b8f84aSMike Kravetz 				chg * pages_per_huge_page(h), &h_cg) < 0)
6435075a61d0SMina Almasry 		goto out_err;
6436075a61d0SMina Almasry 
6437075a61d0SMina Almasry 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6438075a61d0SMina Almasry 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6439075a61d0SMina Almasry 		 * of the resv_map.
6440075a61d0SMina Almasry 		 */
6441075a61d0SMina Almasry 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6442075a61d0SMina Almasry 	}
6443075a61d0SMina Almasry 
64441c5ecae3SMike Kravetz 	/*
64451c5ecae3SMike Kravetz 	 * There must be enough pages in the subpool for the mapping. If
64461c5ecae3SMike Kravetz 	 * the subpool has a minimum size, there may be some global
64471c5ecae3SMike Kravetz 	 * reservations already in place (gbl_reserve).
64481c5ecae3SMike Kravetz 	 */
64491c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
645033b8f84aSMike Kravetz 	if (gbl_reserve < 0)
6451075a61d0SMina Almasry 		goto out_uncharge_cgroup;
645217c9d12eSMel Gorman 
645317c9d12eSMel Gorman 	/*
645417c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
645590481622SDavid Gibson 	 * Hand the pages back to the subpool if there are not
645617c9d12eSMel Gorman 	 */
645733b8f84aSMike Kravetz 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6458075a61d0SMina Almasry 		goto out_put_pages;
645917c9d12eSMel Gorman 
646017c9d12eSMel Gorman 	/*
646117c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
646217c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
646317c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
646417c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
646517c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
646617c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
646717c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
646817c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
646917c9d12eSMel Gorman 	 * else has to be done for private mappings here
647017c9d12eSMel Gorman 	 */
647133039678SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6472075a61d0SMina Almasry 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
647333039678SMike Kravetz 
64740db9d74eSMina Almasry 		if (unlikely(add < 0)) {
64750db9d74eSMina Almasry 			hugetlb_acct_memory(h, -gbl_reserve);
6476075a61d0SMina Almasry 			goto out_put_pages;
64770db9d74eSMina Almasry 		} else if (unlikely(chg > add)) {
647833039678SMike Kravetz 			/*
647933039678SMike Kravetz 			 * pages in this range were added to the reserve
648033039678SMike Kravetz 			 * map between region_chg and region_add.  This
648133039678SMike Kravetz 			 * indicates a race with alloc_huge_page.  Adjust
648233039678SMike Kravetz 			 * the subpool and reserve counts modified above
648333039678SMike Kravetz 			 * based on the difference.
648433039678SMike Kravetz 			 */
648533039678SMike Kravetz 			long rsv_adjust;
648633039678SMike Kravetz 
6487d85aecf2SMiaohe Lin 			/*
6488d85aecf2SMiaohe Lin 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6489d85aecf2SMiaohe Lin 			 * reference to h_cg->css. See comment below for detail.
6490d85aecf2SMiaohe Lin 			 */
6491075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6492075a61d0SMina Almasry 				hstate_index(h),
6493075a61d0SMina Almasry 				(chg - add) * pages_per_huge_page(h), h_cg);
6494075a61d0SMina Almasry 
649533039678SMike Kravetz 			rsv_adjust = hugepage_subpool_put_pages(spool,
649633039678SMike Kravetz 								chg - add);
649733039678SMike Kravetz 			hugetlb_acct_memory(h, -rsv_adjust);
6498d85aecf2SMiaohe Lin 		} else if (h_cg) {
6499d85aecf2SMiaohe Lin 			/*
6500d85aecf2SMiaohe Lin 			 * The file_regions will hold their own reference to
6501d85aecf2SMiaohe Lin 			 * h_cg->css. So we should release the reference held
6502d85aecf2SMiaohe Lin 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6503d85aecf2SMiaohe Lin 			 * done.
6504d85aecf2SMiaohe Lin 			 */
6505d85aecf2SMiaohe Lin 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
650633039678SMike Kravetz 		}
650733039678SMike Kravetz 	}
650833b8f84aSMike Kravetz 	return true;
650933b8f84aSMike Kravetz 
6510075a61d0SMina Almasry out_put_pages:
6511075a61d0SMina Almasry 	/* put back original number of pages, chg */
6512075a61d0SMina Almasry 	(void)hugepage_subpool_put_pages(spool, chg);
6513075a61d0SMina Almasry out_uncharge_cgroup:
6514075a61d0SMina Almasry 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6515075a61d0SMina Almasry 					    chg * pages_per_huge_page(h), h_cg);
6516c50ac050SDave Hansen out_err:
65175e911373SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE)
65180db9d74eSMina Almasry 		/* Only call region_abort if the region_chg succeeded but the
65190db9d74eSMina Almasry 		 * region_add failed or didn't run.
65200db9d74eSMina Almasry 		 */
65210db9d74eSMina Almasry 		if (chg >= 0 && add < 0)
65220db9d74eSMina Almasry 			region_abort(resv_map, from, to, regions_needed);
6523f031dd27SJoonsoo Kim 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6524f031dd27SJoonsoo Kim 		kref_put(&resv_map->refs, resv_map_release);
652533b8f84aSMike Kravetz 	return false;
6526a43a8c39SChen, Kenneth W }
6527a43a8c39SChen, Kenneth W 
6528b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6529b5cec28dSMike Kravetz 								long freed)
6530a43a8c39SChen, Kenneth W {
6531a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
65324e35f483SJoonsoo Kim 	struct resv_map *resv_map = inode_resv_map(inode);
65339119a41eSJoonsoo Kim 	long chg = 0;
653490481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
65351c5ecae3SMike Kravetz 	long gbl_reserve;
653645c682a6SKen Chen 
6537f27a5136SMike Kravetz 	/*
6538f27a5136SMike Kravetz 	 * Since this routine can be called in the evict inode path for all
6539f27a5136SMike Kravetz 	 * hugetlbfs inodes, resv_map could be NULL.
6540f27a5136SMike Kravetz 	 */
6541b5cec28dSMike Kravetz 	if (resv_map) {
6542b5cec28dSMike Kravetz 		chg = region_del(resv_map, start, end);
6543b5cec28dSMike Kravetz 		/*
6544b5cec28dSMike Kravetz 		 * region_del() can fail in the rare case where a region
6545b5cec28dSMike Kravetz 		 * must be split and another region descriptor can not be
6546b5cec28dSMike Kravetz 		 * allocated.  If end == LONG_MAX, it will not fail.
6547b5cec28dSMike Kravetz 		 */
6548b5cec28dSMike Kravetz 		if (chg < 0)
6549b5cec28dSMike Kravetz 			return chg;
6550b5cec28dSMike Kravetz 	}
6551b5cec28dSMike Kravetz 
655245c682a6SKen Chen 	spin_lock(&inode->i_lock);
6553e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
655445c682a6SKen Chen 	spin_unlock(&inode->i_lock);
655545c682a6SKen Chen 
65561c5ecae3SMike Kravetz 	/*
65571c5ecae3SMike Kravetz 	 * If the subpool has a minimum size, the number of global
65581c5ecae3SMike Kravetz 	 * reservations to be released may be adjusted.
6559dddf31a4SMiaohe Lin 	 *
6560dddf31a4SMiaohe Lin 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6561dddf31a4SMiaohe Lin 	 * won't go negative.
65621c5ecae3SMike Kravetz 	 */
65631c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
65641c5ecae3SMike Kravetz 	hugetlb_acct_memory(h, -gbl_reserve);
6565b5cec28dSMike Kravetz 
6566b5cec28dSMike Kravetz 	return 0;
6567a43a8c39SChen, Kenneth W }
656893f70f90SNaoya Horiguchi 
65693212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
65703212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
65713212b535SSteve Capper 				struct vm_area_struct *vma,
65723212b535SSteve Capper 				unsigned long addr, pgoff_t idx)
65733212b535SSteve Capper {
65743212b535SSteve Capper 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
65753212b535SSteve Capper 				svma->vm_start;
65763212b535SSteve Capper 	unsigned long sbase = saddr & PUD_MASK;
65773212b535SSteve Capper 	unsigned long s_end = sbase + PUD_SIZE;
65783212b535SSteve Capper 
65793212b535SSteve Capper 	/* Allow segments to share if only one is marked locked */
6580de60f5f1SEric B Munson 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6581de60f5f1SEric B Munson 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
65823212b535SSteve Capper 
65833212b535SSteve Capper 	/*
65843212b535SSteve Capper 	 * match the virtual addresses, permission and the alignment of the
65853212b535SSteve Capper 	 * page table page.
65863212b535SSteve Capper 	 */
65873212b535SSteve Capper 	if (pmd_index(addr) != pmd_index(saddr) ||
65883212b535SSteve Capper 	    vm_flags != svm_flags ||
658907e51edfSMiaohe Lin 	    !range_in_vma(svma, sbase, s_end))
65903212b535SSteve Capper 		return 0;
65913212b535SSteve Capper 
65923212b535SSteve Capper 	return saddr;
65933212b535SSteve Capper }
65943212b535SSteve Capper 
659531aafb45SNicholas Krause static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
65963212b535SSteve Capper {
65973212b535SSteve Capper 	unsigned long base = addr & PUD_MASK;
65983212b535SSteve Capper 	unsigned long end = base + PUD_SIZE;
65993212b535SSteve Capper 
66003212b535SSteve Capper 	/*
66013212b535SSteve Capper 	 * check on proper vm_flags and page table alignment
66023212b535SSteve Capper 	 */
6603017b1660SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
660431aafb45SNicholas Krause 		return true;
660531aafb45SNicholas Krause 	return false;
66063212b535SSteve Capper }
66073212b535SSteve Capper 
6608c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6609c1991e07SPeter Xu {
6610c1991e07SPeter Xu #ifdef CONFIG_USERFAULTFD
6611c1991e07SPeter Xu 	if (uffd_disable_huge_pmd_share(vma))
6612c1991e07SPeter Xu 		return false;
6613c1991e07SPeter Xu #endif
6614c1991e07SPeter Xu 	return vma_shareable(vma, addr);
6615c1991e07SPeter Xu }
6616c1991e07SPeter Xu 
66173212b535SSteve Capper /*
6618017b1660SMike Kravetz  * Determine if start,end range within vma could be mapped by shared pmd.
6619017b1660SMike Kravetz  * If yes, adjust start and end to cover range associated with possible
6620017b1660SMike Kravetz  * shared pmd mappings.
6621017b1660SMike Kravetz  */
6622017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6623017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6624017b1660SMike Kravetz {
6625a1ba9da8SLi Xinhai 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6626a1ba9da8SLi Xinhai 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6627017b1660SMike Kravetz 
6628a1ba9da8SLi Xinhai 	/*
6629f0953a1bSIngo Molnar 	 * vma needs to span at least one aligned PUD size, and the range
6630f0953a1bSIngo Molnar 	 * must be at least partially within in.
6631a1ba9da8SLi Xinhai 	 */
6632a1ba9da8SLi Xinhai 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6633a1ba9da8SLi Xinhai 		(*end <= v_start) || (*start >= v_end))
6634017b1660SMike Kravetz 		return;
6635017b1660SMike Kravetz 
663675802ca6SPeter Xu 	/* Extend the range to be PUD aligned for a worst case scenario */
6637a1ba9da8SLi Xinhai 	if (*start > v_start)
6638a1ba9da8SLi Xinhai 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6639017b1660SMike Kravetz 
6640a1ba9da8SLi Xinhai 	if (*end < v_end)
6641a1ba9da8SLi Xinhai 		*end = ALIGN(*end, PUD_SIZE);
6642017b1660SMike Kravetz }
6643017b1660SMike Kravetz 
6644017b1660SMike Kravetz /*
66453212b535SSteve Capper  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
66463212b535SSteve Capper  * and returns the corresponding pte. While this is not necessary for the
66473212b535SSteve Capper  * !shared pmd case because we can allocate the pmd later as well, it makes the
6648c0d0381aSMike Kravetz  * code much cleaner.
6649c0d0381aSMike Kravetz  *
66500bf7b64eSMike Kravetz  * This routine must be called with i_mmap_rwsem held in at least read mode if
66510bf7b64eSMike Kravetz  * sharing is possible.  For hugetlbfs, this prevents removal of any page
66520bf7b64eSMike Kravetz  * table entries associated with the address space.  This is important as we
66530bf7b64eSMike Kravetz  * are setting up sharing based on existing page table entries (mappings).
66543212b535SSteve Capper  */
6655aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6656aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
66573212b535SSteve Capper {
66583212b535SSteve Capper 	struct address_space *mapping = vma->vm_file->f_mapping;
66593212b535SSteve Capper 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
66603212b535SSteve Capper 			vma->vm_pgoff;
66613212b535SSteve Capper 	struct vm_area_struct *svma;
66623212b535SSteve Capper 	unsigned long saddr;
66633212b535SSteve Capper 	pte_t *spte = NULL;
66643212b535SSteve Capper 	pte_t *pte;
6665cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
66663212b535SSteve Capper 
66670bf7b64eSMike Kravetz 	i_mmap_assert_locked(mapping);
66683212b535SSteve Capper 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
66693212b535SSteve Capper 		if (svma == vma)
66703212b535SSteve Capper 			continue;
66713212b535SSteve Capper 
66723212b535SSteve Capper 		saddr = page_table_shareable(svma, vma, addr, idx);
66733212b535SSteve Capper 		if (saddr) {
66747868a208SPunit Agrawal 			spte = huge_pte_offset(svma->vm_mm, saddr,
66757868a208SPunit Agrawal 					       vma_mmu_pagesize(svma));
66763212b535SSteve Capper 			if (spte) {
66773212b535SSteve Capper 				get_page(virt_to_page(spte));
66783212b535SSteve Capper 				break;
66793212b535SSteve Capper 			}
66803212b535SSteve Capper 		}
66813212b535SSteve Capper 	}
66823212b535SSteve Capper 
66833212b535SSteve Capper 	if (!spte)
66843212b535SSteve Capper 		goto out;
66853212b535SSteve Capper 
66868bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
6687dc6c9a35SKirill A. Shutemov 	if (pud_none(*pud)) {
66883212b535SSteve Capper 		pud_populate(mm, pud,
66893212b535SSteve Capper 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6690c17b1f42SKirill A. Shutemov 		mm_inc_nr_pmds(mm);
6691dc6c9a35SKirill A. Shutemov 	} else {
66923212b535SSteve Capper 		put_page(virt_to_page(spte));
6693dc6c9a35SKirill A. Shutemov 	}
6694cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
66953212b535SSteve Capper out:
66963212b535SSteve Capper 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
66973212b535SSteve Capper 	return pte;
66983212b535SSteve Capper }
66993212b535SSteve Capper 
67003212b535SSteve Capper /*
67013212b535SSteve Capper  * unmap huge page backed by shared pte.
67023212b535SSteve Capper  *
67033212b535SSteve Capper  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
67043212b535SSteve Capper  * indicated by page_count > 1, unmap is achieved by clearing pud and
67053212b535SSteve Capper  * decrementing the ref count. If count == 1, the pte page is not shared.
67063212b535SSteve Capper  *
6707c0d0381aSMike Kravetz  * Called with page table lock held and i_mmap_rwsem held in write mode.
67083212b535SSteve Capper  *
67093212b535SSteve Capper  * returns: 1 successfully unmapped a shared pte page
67103212b535SSteve Capper  *	    0 the underlying pte page is not shared, or it is the last user
67113212b535SSteve Capper  */
671234ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
671334ae204fSMike Kravetz 					unsigned long *addr, pte_t *ptep)
67143212b535SSteve Capper {
67153212b535SSteve Capper 	pgd_t *pgd = pgd_offset(mm, *addr);
6716c2febafcSKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, *addr);
6717c2febafcSKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, *addr);
67183212b535SSteve Capper 
671934ae204fSMike Kravetz 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
67203212b535SSteve Capper 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
67213212b535SSteve Capper 	if (page_count(virt_to_page(ptep)) == 1)
67223212b535SSteve Capper 		return 0;
67233212b535SSteve Capper 
67243212b535SSteve Capper 	pud_clear(pud);
67253212b535SSteve Capper 	put_page(virt_to_page(ptep));
6726dc6c9a35SKirill A. Shutemov 	mm_dec_nr_pmds(mm);
67273212b535SSteve Capper 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
67283212b535SSteve Capper 	return 1;
67293212b535SSteve Capper }
6730c1991e07SPeter Xu 
67319e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6732aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6733aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
67349e5fc74cSSteve Capper {
67359e5fc74cSSteve Capper 	return NULL;
67369e5fc74cSSteve Capper }
6737e81f2d22SZhang Zhen 
673834ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
673934ae204fSMike Kravetz 				unsigned long *addr, pte_t *ptep)
6740e81f2d22SZhang Zhen {
6741e81f2d22SZhang Zhen 	return 0;
6742e81f2d22SZhang Zhen }
6743017b1660SMike Kravetz 
6744017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6745017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6746017b1660SMike Kravetz {
6747017b1660SMike Kravetz }
6748c1991e07SPeter Xu 
6749c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6750c1991e07SPeter Xu {
6751c1991e07SPeter Xu 	return false;
6752c1991e07SPeter Xu }
67533212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
67543212b535SSteve Capper 
67559e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6756aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
67579e5fc74cSSteve Capper 			unsigned long addr, unsigned long sz)
67589e5fc74cSSteve Capper {
67599e5fc74cSSteve Capper 	pgd_t *pgd;
6760c2febafcSKirill A. Shutemov 	p4d_t *p4d;
67619e5fc74cSSteve Capper 	pud_t *pud;
67629e5fc74cSSteve Capper 	pte_t *pte = NULL;
67639e5fc74cSSteve Capper 
67649e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6765f4f0a3d8SKirill A. Shutemov 	p4d = p4d_alloc(mm, pgd, addr);
6766f4f0a3d8SKirill A. Shutemov 	if (!p4d)
6767f4f0a3d8SKirill A. Shutemov 		return NULL;
6768c2febafcSKirill A. Shutemov 	pud = pud_alloc(mm, p4d, addr);
67699e5fc74cSSteve Capper 	if (pud) {
67709e5fc74cSSteve Capper 		if (sz == PUD_SIZE) {
67719e5fc74cSSteve Capper 			pte = (pte_t *)pud;
67729e5fc74cSSteve Capper 		} else {
67739e5fc74cSSteve Capper 			BUG_ON(sz != PMD_SIZE);
6774c1991e07SPeter Xu 			if (want_pmd_share(vma, addr) && pud_none(*pud))
6775aec44e0fSPeter Xu 				pte = huge_pmd_share(mm, vma, addr, pud);
67769e5fc74cSSteve Capper 			else
67779e5fc74cSSteve Capper 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
67789e5fc74cSSteve Capper 		}
67799e5fc74cSSteve Capper 	}
67804e666314SMichal Hocko 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
67819e5fc74cSSteve Capper 
67829e5fc74cSSteve Capper 	return pte;
67839e5fc74cSSteve Capper }
67849e5fc74cSSteve Capper 
67859b19df29SPunit Agrawal /*
67869b19df29SPunit Agrawal  * huge_pte_offset() - Walk the page table to resolve the hugepage
67879b19df29SPunit Agrawal  * entry at address @addr
67889b19df29SPunit Agrawal  *
67898ac0b81aSLi Xinhai  * Return: Pointer to page table entry (PUD or PMD) for
67908ac0b81aSLi Xinhai  * address @addr, or NULL if a !p*d_present() entry is encountered and the
67919b19df29SPunit Agrawal  * size @sz doesn't match the hugepage size at this level of the page
67929b19df29SPunit Agrawal  * table.
67939b19df29SPunit Agrawal  */
67947868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
67957868a208SPunit Agrawal 		       unsigned long addr, unsigned long sz)
67969e5fc74cSSteve Capper {
67979e5fc74cSSteve Capper 	pgd_t *pgd;
6798c2febafcSKirill A. Shutemov 	p4d_t *p4d;
67998ac0b81aSLi Xinhai 	pud_t *pud;
68008ac0b81aSLi Xinhai 	pmd_t *pmd;
68019e5fc74cSSteve Capper 
68029e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6803c2febafcSKirill A. Shutemov 	if (!pgd_present(*pgd))
6804c2febafcSKirill A. Shutemov 		return NULL;
6805c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
6806c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
6807c2febafcSKirill A. Shutemov 		return NULL;
68089b19df29SPunit Agrawal 
6809c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
68108ac0b81aSLi Xinhai 	if (sz == PUD_SIZE)
68118ac0b81aSLi Xinhai 		/* must be pud huge, non-present or none */
68129e5fc74cSSteve Capper 		return (pte_t *)pud;
68138ac0b81aSLi Xinhai 	if (!pud_present(*pud))
68148ac0b81aSLi Xinhai 		return NULL;
68158ac0b81aSLi Xinhai 	/* must have a valid entry and size to go further */
68169b19df29SPunit Agrawal 
68179e5fc74cSSteve Capper 	pmd = pmd_offset(pud, addr);
68188ac0b81aSLi Xinhai 	/* must be pmd huge, non-present or none */
68199e5fc74cSSteve Capper 	return (pte_t *)pmd;
68209e5fc74cSSteve Capper }
68219e5fc74cSSteve Capper 
682261f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
682361f77edaSNaoya Horiguchi 
682461f77edaSNaoya Horiguchi /*
682561f77edaSNaoya Horiguchi  * These functions are overwritable if your architecture needs its own
682661f77edaSNaoya Horiguchi  * behavior.
682761f77edaSNaoya Horiguchi  */
682861f77edaSNaoya Horiguchi struct page * __weak
682961f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address,
683061f77edaSNaoya Horiguchi 			      int write)
683161f77edaSNaoya Horiguchi {
683261f77edaSNaoya Horiguchi 	return ERR_PTR(-EINVAL);
683361f77edaSNaoya Horiguchi }
683461f77edaSNaoya Horiguchi 
683561f77edaSNaoya Horiguchi struct page * __weak
68364dc71451SAneesh Kumar K.V follow_huge_pd(struct vm_area_struct *vma,
68374dc71451SAneesh Kumar K.V 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
68384dc71451SAneesh Kumar K.V {
68394dc71451SAneesh Kumar K.V 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
68404dc71451SAneesh Kumar K.V 	return NULL;
68414dc71451SAneesh Kumar K.V }
68424dc71451SAneesh Kumar K.V 
68434dc71451SAneesh Kumar K.V struct page * __weak
68449e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6845e66f17ffSNaoya Horiguchi 		pmd_t *pmd, int flags)
68469e5fc74cSSteve Capper {
6847e66f17ffSNaoya Horiguchi 	struct page *page = NULL;
6848e66f17ffSNaoya Horiguchi 	spinlock_t *ptl;
6849c9d398faSNaoya Horiguchi 	pte_t pte;
68503faa52c0SJohn Hubbard 
68518909691bSDavid Hildenbrand 	/*
68528909691bSDavid Hildenbrand 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
68538909691bSDavid Hildenbrand 	 * follow_hugetlb_page().
68548909691bSDavid Hildenbrand 	 */
68558909691bSDavid Hildenbrand 	if (WARN_ON_ONCE(flags & FOLL_PIN))
68563faa52c0SJohn Hubbard 		return NULL;
68573faa52c0SJohn Hubbard 
6858e66f17ffSNaoya Horiguchi retry:
6859e66f17ffSNaoya Horiguchi 	ptl = pmd_lockptr(mm, pmd);
6860e66f17ffSNaoya Horiguchi 	spin_lock(ptl);
6861e66f17ffSNaoya Horiguchi 	/*
6862e66f17ffSNaoya Horiguchi 	 * make sure that the address range covered by this pmd is not
6863e66f17ffSNaoya Horiguchi 	 * unmapped from other threads.
6864e66f17ffSNaoya Horiguchi 	 */
6865e66f17ffSNaoya Horiguchi 	if (!pmd_huge(*pmd))
6866e66f17ffSNaoya Horiguchi 		goto out;
6867c9d398faSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pmd);
6868c9d398faSNaoya Horiguchi 	if (pte_present(pte)) {
686997534127SGerald Schaefer 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
68703faa52c0SJohn Hubbard 		/*
68713faa52c0SJohn Hubbard 		 * try_grab_page() should always succeed here, because: a) we
68723faa52c0SJohn Hubbard 		 * hold the pmd (ptl) lock, and b) we've just checked that the
68733faa52c0SJohn Hubbard 		 * huge pmd (head) page is present in the page tables. The ptl
68743faa52c0SJohn Hubbard 		 * prevents the head page and tail pages from being rearranged
68753faa52c0SJohn Hubbard 		 * in any way. So this page must be available at this point,
68763faa52c0SJohn Hubbard 		 * unless the page refcount overflowed:
68773faa52c0SJohn Hubbard 		 */
68783faa52c0SJohn Hubbard 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
68793faa52c0SJohn Hubbard 			page = NULL;
68803faa52c0SJohn Hubbard 			goto out;
68813faa52c0SJohn Hubbard 		}
6882e66f17ffSNaoya Horiguchi 	} else {
6883c9d398faSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
6884e66f17ffSNaoya Horiguchi 			spin_unlock(ptl);
6885e66f17ffSNaoya Horiguchi 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
6886e66f17ffSNaoya Horiguchi 			goto retry;
6887e66f17ffSNaoya Horiguchi 		}
6888e66f17ffSNaoya Horiguchi 		/*
6889e66f17ffSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
6890e66f17ffSNaoya Horiguchi 		 * follow_page_mask().
6891e66f17ffSNaoya Horiguchi 		 */
6892e66f17ffSNaoya Horiguchi 	}
6893e66f17ffSNaoya Horiguchi out:
6894e66f17ffSNaoya Horiguchi 	spin_unlock(ptl);
68959e5fc74cSSteve Capper 	return page;
68969e5fc74cSSteve Capper }
68979e5fc74cSSteve Capper 
689861f77edaSNaoya Horiguchi struct page * __weak
68999e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address,
6900e66f17ffSNaoya Horiguchi 		pud_t *pud, int flags)
69019e5fc74cSSteve Capper {
69023faa52c0SJohn Hubbard 	if (flags & (FOLL_GET | FOLL_PIN))
6903e66f17ffSNaoya Horiguchi 		return NULL;
69049e5fc74cSSteve Capper 
6905e66f17ffSNaoya Horiguchi 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
69069e5fc74cSSteve Capper }
69079e5fc74cSSteve Capper 
6908faaa5b62SAnshuman Khandual struct page * __weak
6909faaa5b62SAnshuman Khandual follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
6910faaa5b62SAnshuman Khandual {
69113faa52c0SJohn Hubbard 	if (flags & (FOLL_GET | FOLL_PIN))
6912faaa5b62SAnshuman Khandual 		return NULL;
6913faaa5b62SAnshuman Khandual 
6914faaa5b62SAnshuman Khandual 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
6915faaa5b62SAnshuman Khandual }
6916faaa5b62SAnshuman Khandual 
691731caf665SNaoya Horiguchi bool isolate_huge_page(struct page *page, struct list_head *list)
691831caf665SNaoya Horiguchi {
6919bcc54222SNaoya Horiguchi 	bool ret = true;
6920bcc54222SNaoya Horiguchi 
6921db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
69228f251a3dSMike Kravetz 	if (!PageHeadHuge(page) ||
69238f251a3dSMike Kravetz 	    !HPageMigratable(page) ||
69240eb2df2bSMuchun Song 	    !get_page_unless_zero(page)) {
6925bcc54222SNaoya Horiguchi 		ret = false;
6926bcc54222SNaoya Horiguchi 		goto unlock;
6927bcc54222SNaoya Horiguchi 	}
69288f251a3dSMike Kravetz 	ClearHPageMigratable(page);
692931caf665SNaoya Horiguchi 	list_move_tail(&page->lru, list);
6930bcc54222SNaoya Horiguchi unlock:
6931db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
6932bcc54222SNaoya Horiguchi 	return ret;
693331caf665SNaoya Horiguchi }
693431caf665SNaoya Horiguchi 
693525182f05SNaoya Horiguchi int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
693625182f05SNaoya Horiguchi {
693725182f05SNaoya Horiguchi 	int ret = 0;
693825182f05SNaoya Horiguchi 
693925182f05SNaoya Horiguchi 	*hugetlb = false;
694025182f05SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
694125182f05SNaoya Horiguchi 	if (PageHeadHuge(page)) {
694225182f05SNaoya Horiguchi 		*hugetlb = true;
6943b283d983SNaoya Horiguchi 		if (HPageFreed(page))
6944b283d983SNaoya Horiguchi 			ret = 0;
6945b283d983SNaoya Horiguchi 		else if (HPageMigratable(page))
694625182f05SNaoya Horiguchi 			ret = get_page_unless_zero(page);
69470ed950d1SNaoya Horiguchi 		else
69480ed950d1SNaoya Horiguchi 			ret = -EBUSY;
694925182f05SNaoya Horiguchi 	}
695025182f05SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
695125182f05SNaoya Horiguchi 	return ret;
695225182f05SNaoya Horiguchi }
695325182f05SNaoya Horiguchi 
6954405ce051SNaoya Horiguchi int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
6955405ce051SNaoya Horiguchi {
6956405ce051SNaoya Horiguchi 	int ret;
6957405ce051SNaoya Horiguchi 
6958405ce051SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
6959405ce051SNaoya Horiguchi 	ret = __get_huge_page_for_hwpoison(pfn, flags);
6960405ce051SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
6961405ce051SNaoya Horiguchi 	return ret;
6962405ce051SNaoya Horiguchi }
6963405ce051SNaoya Horiguchi 
696431caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page)
696531caf665SNaoya Horiguchi {
6966db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
69678f251a3dSMike Kravetz 	SetHPageMigratable(page);
696831caf665SNaoya Horiguchi 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
6969db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
697031caf665SNaoya Horiguchi 	put_page(page);
697131caf665SNaoya Horiguchi }
6972ab5ac90aSMichal Hocko 
6973ab5ac90aSMichal Hocko void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
6974ab5ac90aSMichal Hocko {
6975ab5ac90aSMichal Hocko 	struct hstate *h = page_hstate(oldpage);
6976ab5ac90aSMichal Hocko 
6977ab5ac90aSMichal Hocko 	hugetlb_cgroup_migrate(oldpage, newpage);
6978ab5ac90aSMichal Hocko 	set_page_owner_migrate_reason(newpage, reason);
6979ab5ac90aSMichal Hocko 
6980ab5ac90aSMichal Hocko 	/*
6981ab5ac90aSMichal Hocko 	 * transfer temporary state of the new huge page. This is
6982ab5ac90aSMichal Hocko 	 * reverse to other transitions because the newpage is going to
6983ab5ac90aSMichal Hocko 	 * be final while the old one will be freed so it takes over
6984ab5ac90aSMichal Hocko 	 * the temporary status.
6985ab5ac90aSMichal Hocko 	 *
6986ab5ac90aSMichal Hocko 	 * Also note that we have to transfer the per-node surplus state
6987ab5ac90aSMichal Hocko 	 * here as well otherwise the global surplus count will not match
6988ab5ac90aSMichal Hocko 	 * the per-node's.
6989ab5ac90aSMichal Hocko 	 */
69909157c311SMike Kravetz 	if (HPageTemporary(newpage)) {
6991ab5ac90aSMichal Hocko 		int old_nid = page_to_nid(oldpage);
6992ab5ac90aSMichal Hocko 		int new_nid = page_to_nid(newpage);
6993ab5ac90aSMichal Hocko 
69949157c311SMike Kravetz 		SetHPageTemporary(oldpage);
69959157c311SMike Kravetz 		ClearHPageTemporary(newpage);
6996ab5ac90aSMichal Hocko 
69975af1ab1dSMiaohe Lin 		/*
69985af1ab1dSMiaohe Lin 		 * There is no need to transfer the per-node surplus state
69995af1ab1dSMiaohe Lin 		 * when we do not cross the node.
70005af1ab1dSMiaohe Lin 		 */
70015af1ab1dSMiaohe Lin 		if (new_nid == old_nid)
70025af1ab1dSMiaohe Lin 			return;
7003db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
7004ab5ac90aSMichal Hocko 		if (h->surplus_huge_pages_node[old_nid]) {
7005ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[old_nid]--;
7006ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[new_nid]++;
7007ab5ac90aSMichal Hocko 		}
7008db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
7009ab5ac90aSMichal Hocko 	}
7010ab5ac90aSMichal Hocko }
7011cf11e85fSRoman Gushchin 
70126dfeaff9SPeter Xu /*
70136dfeaff9SPeter Xu  * This function will unconditionally remove all the shared pmd pgtable entries
70146dfeaff9SPeter Xu  * within the specific vma for a hugetlbfs memory range.
70156dfeaff9SPeter Xu  */
70166dfeaff9SPeter Xu void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
70176dfeaff9SPeter Xu {
70186dfeaff9SPeter Xu 	struct hstate *h = hstate_vma(vma);
70196dfeaff9SPeter Xu 	unsigned long sz = huge_page_size(h);
70206dfeaff9SPeter Xu 	struct mm_struct *mm = vma->vm_mm;
70216dfeaff9SPeter Xu 	struct mmu_notifier_range range;
70226dfeaff9SPeter Xu 	unsigned long address, start, end;
70236dfeaff9SPeter Xu 	spinlock_t *ptl;
70246dfeaff9SPeter Xu 	pte_t *ptep;
70256dfeaff9SPeter Xu 
70266dfeaff9SPeter Xu 	if (!(vma->vm_flags & VM_MAYSHARE))
70276dfeaff9SPeter Xu 		return;
70286dfeaff9SPeter Xu 
70296dfeaff9SPeter Xu 	start = ALIGN(vma->vm_start, PUD_SIZE);
70306dfeaff9SPeter Xu 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
70316dfeaff9SPeter Xu 
70326dfeaff9SPeter Xu 	if (start >= end)
70336dfeaff9SPeter Xu 		return;
70346dfeaff9SPeter Xu 
70359c8bbfacSBaolin Wang 	flush_cache_range(vma, start, end);
70366dfeaff9SPeter Xu 	/*
70376dfeaff9SPeter Xu 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
70386dfeaff9SPeter Xu 	 * we have already done the PUD_SIZE alignment.
70396dfeaff9SPeter Xu 	 */
70406dfeaff9SPeter Xu 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
70416dfeaff9SPeter Xu 				start, end);
70426dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_start(&range);
70436dfeaff9SPeter Xu 	i_mmap_lock_write(vma->vm_file->f_mapping);
70446dfeaff9SPeter Xu 	for (address = start; address < end; address += PUD_SIZE) {
70456dfeaff9SPeter Xu 		unsigned long tmp = address;
70466dfeaff9SPeter Xu 
70476dfeaff9SPeter Xu 		ptep = huge_pte_offset(mm, address, sz);
70486dfeaff9SPeter Xu 		if (!ptep)
70496dfeaff9SPeter Xu 			continue;
70506dfeaff9SPeter Xu 		ptl = huge_pte_lock(h, mm, ptep);
70516dfeaff9SPeter Xu 		/* We don't want 'address' to be changed */
70526dfeaff9SPeter Xu 		huge_pmd_unshare(mm, vma, &tmp, ptep);
70536dfeaff9SPeter Xu 		spin_unlock(ptl);
70546dfeaff9SPeter Xu 	}
70556dfeaff9SPeter Xu 	flush_hugetlb_tlb_range(vma, start, end);
70566dfeaff9SPeter Xu 	i_mmap_unlock_write(vma->vm_file->f_mapping);
70576dfeaff9SPeter Xu 	/*
70586dfeaff9SPeter Xu 	 * No need to call mmu_notifier_invalidate_range(), see
70596dfeaff9SPeter Xu 	 * Documentation/vm/mmu_notifier.rst.
70606dfeaff9SPeter Xu 	 */
70616dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_end(&range);
70626dfeaff9SPeter Xu }
70636dfeaff9SPeter Xu 
7064cf11e85fSRoman Gushchin #ifdef CONFIG_CMA
7065cf11e85fSRoman Gushchin static bool cma_reserve_called __initdata;
7066cf11e85fSRoman Gushchin 
7067cf11e85fSRoman Gushchin static int __init cmdline_parse_hugetlb_cma(char *p)
7068cf11e85fSRoman Gushchin {
706938e719abSBaolin Wang 	int nid, count = 0;
707038e719abSBaolin Wang 	unsigned long tmp;
707138e719abSBaolin Wang 	char *s = p;
707238e719abSBaolin Wang 
707338e719abSBaolin Wang 	while (*s) {
707438e719abSBaolin Wang 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
707538e719abSBaolin Wang 			break;
707638e719abSBaolin Wang 
707738e719abSBaolin Wang 		if (s[count] == ':') {
7078f9317f77SMike Kravetz 			if (tmp >= MAX_NUMNODES)
707938e719abSBaolin Wang 				break;
7080f9317f77SMike Kravetz 			nid = array_index_nospec(tmp, MAX_NUMNODES);
708138e719abSBaolin Wang 
708238e719abSBaolin Wang 			s += count + 1;
708338e719abSBaolin Wang 			tmp = memparse(s, &s);
708438e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = tmp;
708538e719abSBaolin Wang 			hugetlb_cma_size += tmp;
708638e719abSBaolin Wang 
708738e719abSBaolin Wang 			/*
708838e719abSBaolin Wang 			 * Skip the separator if have one, otherwise
708938e719abSBaolin Wang 			 * break the parsing.
709038e719abSBaolin Wang 			 */
709138e719abSBaolin Wang 			if (*s == ',')
709238e719abSBaolin Wang 				s++;
709338e719abSBaolin Wang 			else
709438e719abSBaolin Wang 				break;
709538e719abSBaolin Wang 		} else {
7096cf11e85fSRoman Gushchin 			hugetlb_cma_size = memparse(p, &p);
709738e719abSBaolin Wang 			break;
709838e719abSBaolin Wang 		}
709938e719abSBaolin Wang 	}
710038e719abSBaolin Wang 
7101cf11e85fSRoman Gushchin 	return 0;
7102cf11e85fSRoman Gushchin }
7103cf11e85fSRoman Gushchin 
7104cf11e85fSRoman Gushchin early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7105cf11e85fSRoman Gushchin 
7106cf11e85fSRoman Gushchin void __init hugetlb_cma_reserve(int order)
7107cf11e85fSRoman Gushchin {
7108cf11e85fSRoman Gushchin 	unsigned long size, reserved, per_node;
710938e719abSBaolin Wang 	bool node_specific_cma_alloc = false;
7110cf11e85fSRoman Gushchin 	int nid;
7111cf11e85fSRoman Gushchin 
7112cf11e85fSRoman Gushchin 	cma_reserve_called = true;
7113cf11e85fSRoman Gushchin 
7114cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size)
7115cf11e85fSRoman Gushchin 		return;
7116cf11e85fSRoman Gushchin 
711738e719abSBaolin Wang 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
711838e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] == 0)
711938e719abSBaolin Wang 			continue;
712038e719abSBaolin Wang 
712130a51400SPeng Liu 		if (!node_online(nid)) {
712238e719abSBaolin Wang 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
712338e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
712438e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
712538e719abSBaolin Wang 			continue;
712638e719abSBaolin Wang 		}
712738e719abSBaolin Wang 
712838e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
712938e719abSBaolin Wang 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
713038e719abSBaolin Wang 				nid, (PAGE_SIZE << order) / SZ_1M);
713138e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
713238e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
713338e719abSBaolin Wang 		} else {
713438e719abSBaolin Wang 			node_specific_cma_alloc = true;
713538e719abSBaolin Wang 		}
713638e719abSBaolin Wang 	}
713738e719abSBaolin Wang 
713838e719abSBaolin Wang 	/* Validate the CMA size again in case some invalid nodes specified. */
713938e719abSBaolin Wang 	if (!hugetlb_cma_size)
714038e719abSBaolin Wang 		return;
714138e719abSBaolin Wang 
7142cf11e85fSRoman Gushchin 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7143cf11e85fSRoman Gushchin 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7144cf11e85fSRoman Gushchin 			(PAGE_SIZE << order) / SZ_1M);
7145a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7146cf11e85fSRoman Gushchin 		return;
7147cf11e85fSRoman Gushchin 	}
7148cf11e85fSRoman Gushchin 
714938e719abSBaolin Wang 	if (!node_specific_cma_alloc) {
7150cf11e85fSRoman Gushchin 		/*
7151cf11e85fSRoman Gushchin 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7152cf11e85fSRoman Gushchin 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7153cf11e85fSRoman Gushchin 		 */
7154cf11e85fSRoman Gushchin 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7155cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7156cf11e85fSRoman Gushchin 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
715738e719abSBaolin Wang 	}
7158cf11e85fSRoman Gushchin 
7159cf11e85fSRoman Gushchin 	reserved = 0;
716030a51400SPeng Liu 	for_each_online_node(nid) {
7161cf11e85fSRoman Gushchin 		int res;
71622281f797SBarry Song 		char name[CMA_MAX_NAME];
7163cf11e85fSRoman Gushchin 
716438e719abSBaolin Wang 		if (node_specific_cma_alloc) {
716538e719abSBaolin Wang 			if (hugetlb_cma_size_in_node[nid] == 0)
716638e719abSBaolin Wang 				continue;
716738e719abSBaolin Wang 
716838e719abSBaolin Wang 			size = hugetlb_cma_size_in_node[nid];
716938e719abSBaolin Wang 		} else {
7170cf11e85fSRoman Gushchin 			size = min(per_node, hugetlb_cma_size - reserved);
717138e719abSBaolin Wang 		}
717238e719abSBaolin Wang 
7173cf11e85fSRoman Gushchin 		size = round_up(size, PAGE_SIZE << order);
7174cf11e85fSRoman Gushchin 
71752281f797SBarry Song 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7176a01f4390SMike Kravetz 		/*
7177a01f4390SMike Kravetz 		 * Note that 'order per bit' is based on smallest size that
7178a01f4390SMike Kravetz 		 * may be returned to CMA allocator in the case of
7179a01f4390SMike Kravetz 		 * huge page demotion.
7180a01f4390SMike Kravetz 		 */
7181a01f4390SMike Kravetz 		res = cma_declare_contiguous_nid(0, size, 0,
7182a01f4390SMike Kravetz 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
718329d0f41dSBarry Song 						 0, false, name,
7184cf11e85fSRoman Gushchin 						 &hugetlb_cma[nid], nid);
7185cf11e85fSRoman Gushchin 		if (res) {
7186cf11e85fSRoman Gushchin 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7187cf11e85fSRoman Gushchin 				res, nid);
7188cf11e85fSRoman Gushchin 			continue;
7189cf11e85fSRoman Gushchin 		}
7190cf11e85fSRoman Gushchin 
7191cf11e85fSRoman Gushchin 		reserved += size;
7192cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7193cf11e85fSRoman Gushchin 			size / SZ_1M, nid);
7194cf11e85fSRoman Gushchin 
7195cf11e85fSRoman Gushchin 		if (reserved >= hugetlb_cma_size)
7196cf11e85fSRoman Gushchin 			break;
7197cf11e85fSRoman Gushchin 	}
7198a01f4390SMike Kravetz 
7199a01f4390SMike Kravetz 	if (!reserved)
7200a01f4390SMike Kravetz 		/*
7201a01f4390SMike Kravetz 		 * hugetlb_cma_size is used to determine if allocations from
7202a01f4390SMike Kravetz 		 * cma are possible.  Set to zero if no cma regions are set up.
7203a01f4390SMike Kravetz 		 */
7204a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7205cf11e85fSRoman Gushchin }
7206cf11e85fSRoman Gushchin 
7207cf11e85fSRoman Gushchin void __init hugetlb_cma_check(void)
7208cf11e85fSRoman Gushchin {
7209cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size || cma_reserve_called)
7210cf11e85fSRoman Gushchin 		return;
7211cf11e85fSRoman Gushchin 
7212cf11e85fSRoman Gushchin 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7213cf11e85fSRoman Gushchin }
7214cf11e85fSRoman Gushchin 
7215cf11e85fSRoman Gushchin #endif /* CONFIG_CMA */
7216