xref: /openbmc/linux/mm/hugetlb.c (revision 14455eab)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Generic hugetlb support.
46d49e352SNadia Yvette Chambers  * (C) Nadia Yvette Chambers, April 2004
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include <linux/list.h>
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/mm.h>
9e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
101da177e4SLinus Torvalds #include <linux/sysctl.h>
111da177e4SLinus Torvalds #include <linux/highmem.h>
12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
131da177e4SLinus Torvalds #include <linux/nodemask.h>
1463551ae0SDavid Gibson #include <linux/pagemap.h>
155da7ca86SChristoph Lameter #include <linux/mempolicy.h>
163b32123dSGideon Israel Dsouza #include <linux/compiler.h>
17aea47ff3SChristoph Lameter #include <linux/cpuset.h>
183935baa9SDavid Gibson #include <linux/mutex.h>
1997ad1087SMike Rapoport #include <linux/memblock.h>
20a3437870SNishanth Aravamudan #include <linux/sysfs.h>
215a0e3ad6STejun Heo #include <linux/slab.h>
22bbe88753SJoonsoo Kim #include <linux/sched/mm.h>
2363489f8eSMike Kravetz #include <linux/mmdebug.h>
24174cd4b1SIngo Molnar #include <linux/sched/signal.h>
250fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
26c6247f72SMatthew Wilcox #include <linux/string_helpers.h>
27fd6a03edSNaoya Horiguchi #include <linux/swap.h>
28fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
298382d914SDavidlohr Bueso #include <linux/jhash.h>
3098fa15f3SAnshuman Khandual #include <linux/numa.h>
31c77c0a8aSWaiman Long #include <linux/llist.h>
32cf11e85fSRoman Gushchin #include <linux/cma.h>
338cc5fcbbSMina Almasry #include <linux/migrate.h>
34f9317f77SMike Kravetz #include <linux/nospec.h>
35662ce1dcSYang Yang #include <linux/delayacct.h>
36d6606683SLinus Torvalds 
3763551ae0SDavid Gibson #include <asm/page.h>
38ca15ca40SMike Rapoport #include <asm/pgalloc.h>
3924669e58SAneesh Kumar K.V #include <asm/tlb.h>
4063551ae0SDavid Gibson 
4124669e58SAneesh Kumar K.V #include <linux/io.h>
4263551ae0SDavid Gibson #include <linux/hugetlb.h>
439dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
449a305230SLee Schermerhorn #include <linux/node.h>
45ab5ac90aSMichal Hocko #include <linux/page_owner.h>
467835e98bSNick Piggin #include "internal.h"
47f41f2ed4SMuchun Song #include "hugetlb_vmemmap.h"
481da177e4SLinus Torvalds 
49c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
50e5ff2159SAndi Kleen unsigned int default_hstate_idx;
51e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
52cf11e85fSRoman Gushchin 
53dbda8feaSBarry Song #ifdef CONFIG_CMA
54cf11e85fSRoman Gushchin static struct cma *hugetlb_cma[MAX_NUMNODES];
5538e719abSBaolin Wang static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
56a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
57a01f4390SMike Kravetz {
58a01f4390SMike Kravetz 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
59a01f4390SMike Kravetz 				1 << order);
60a01f4390SMike Kravetz }
61a01f4390SMike Kravetz #else
62a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
63a01f4390SMike Kravetz {
64a01f4390SMike Kravetz 	return false;
65a01f4390SMike Kravetz }
66dbda8feaSBarry Song #endif
67dbda8feaSBarry Song static unsigned long hugetlb_cma_size __initdata;
68cf11e85fSRoman Gushchin 
6953ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
7053ba51d2SJon Tollefson 
71e5ff2159SAndi Kleen /* for command line parsing */
72e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
73e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
749fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
75282f4214SMike Kravetz static bool __initdata parsed_default_hugepagesz;
76b5389086SZhenguo Yao static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
77e5ff2159SAndi Kleen 
783935baa9SDavid Gibson /*
7931caf665SNaoya Horiguchi  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
8031caf665SNaoya Horiguchi  * free_huge_pages, and surplus_huge_pages.
813935baa9SDavid Gibson  */
82c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
830bd0f9fbSEric Paris 
848382d914SDavidlohr Bueso /*
858382d914SDavidlohr Bueso  * Serializes faults on the same logical page.  This is used to
868382d914SDavidlohr Bueso  * prevent spurious OOMs when the hugepage pool is fully utilized.
878382d914SDavidlohr Bueso  */
888382d914SDavidlohr Bueso static int num_fault_mutexes;
89c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
908382d914SDavidlohr Bueso 
917ca02d0aSMike Kravetz /* Forward declaration */
927ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
937ca02d0aSMike Kravetz 
941d88433bSMiaohe Lin static inline bool subpool_is_free(struct hugepage_subpool *spool)
951d88433bSMiaohe Lin {
961d88433bSMiaohe Lin 	if (spool->count)
971d88433bSMiaohe Lin 		return false;
981d88433bSMiaohe Lin 	if (spool->max_hpages != -1)
991d88433bSMiaohe Lin 		return spool->used_hpages == 0;
1001d88433bSMiaohe Lin 	if (spool->min_hpages != -1)
1011d88433bSMiaohe Lin 		return spool->rsv_hpages == spool->min_hpages;
1021d88433bSMiaohe Lin 
1031d88433bSMiaohe Lin 	return true;
1041d88433bSMiaohe Lin }
1051d88433bSMiaohe Lin 
106db71ef79SMike Kravetz static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
107db71ef79SMike Kravetz 						unsigned long irq_flags)
10890481622SDavid Gibson {
109db71ef79SMike Kravetz 	spin_unlock_irqrestore(&spool->lock, irq_flags);
11090481622SDavid Gibson 
11190481622SDavid Gibson 	/* If no pages are used, and no other handles to the subpool
1127c8de358SEthon Paul 	 * remain, give up any reservations based on minimum size and
1137ca02d0aSMike Kravetz 	 * free the subpool */
1141d88433bSMiaohe Lin 	if (subpool_is_free(spool)) {
1157ca02d0aSMike Kravetz 		if (spool->min_hpages != -1)
1167ca02d0aSMike Kravetz 			hugetlb_acct_memory(spool->hstate,
1177ca02d0aSMike Kravetz 						-spool->min_hpages);
11890481622SDavid Gibson 		kfree(spool);
11990481622SDavid Gibson 	}
1207ca02d0aSMike Kravetz }
12190481622SDavid Gibson 
1227ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
1237ca02d0aSMike Kravetz 						long min_hpages)
12490481622SDavid Gibson {
12590481622SDavid Gibson 	struct hugepage_subpool *spool;
12690481622SDavid Gibson 
127c6a91820SMike Kravetz 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
12890481622SDavid Gibson 	if (!spool)
12990481622SDavid Gibson 		return NULL;
13090481622SDavid Gibson 
13190481622SDavid Gibson 	spin_lock_init(&spool->lock);
13290481622SDavid Gibson 	spool->count = 1;
1337ca02d0aSMike Kravetz 	spool->max_hpages = max_hpages;
1347ca02d0aSMike Kravetz 	spool->hstate = h;
1357ca02d0aSMike Kravetz 	spool->min_hpages = min_hpages;
1367ca02d0aSMike Kravetz 
1377ca02d0aSMike Kravetz 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1387ca02d0aSMike Kravetz 		kfree(spool);
1397ca02d0aSMike Kravetz 		return NULL;
1407ca02d0aSMike Kravetz 	}
1417ca02d0aSMike Kravetz 	spool->rsv_hpages = min_hpages;
14290481622SDavid Gibson 
14390481622SDavid Gibson 	return spool;
14490481622SDavid Gibson }
14590481622SDavid Gibson 
14690481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
14790481622SDavid Gibson {
148db71ef79SMike Kravetz 	unsigned long flags;
149db71ef79SMike Kravetz 
150db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
15190481622SDavid Gibson 	BUG_ON(!spool->count);
15290481622SDavid Gibson 	spool->count--;
153db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
15490481622SDavid Gibson }
15590481622SDavid Gibson 
1561c5ecae3SMike Kravetz /*
1571c5ecae3SMike Kravetz  * Subpool accounting for allocating and reserving pages.
1581c5ecae3SMike Kravetz  * Return -ENOMEM if there are not enough resources to satisfy the
1599e7ee400SRandy Dunlap  * request.  Otherwise, return the number of pages by which the
1601c5ecae3SMike Kravetz  * global pools must be adjusted (upward).  The returned value may
1611c5ecae3SMike Kravetz  * only be different than the passed value (delta) in the case where
1627c8de358SEthon Paul  * a subpool minimum size must be maintained.
1631c5ecae3SMike Kravetz  */
1641c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
16590481622SDavid Gibson 				      long delta)
16690481622SDavid Gibson {
1671c5ecae3SMike Kravetz 	long ret = delta;
16890481622SDavid Gibson 
16990481622SDavid Gibson 	if (!spool)
1701c5ecae3SMike Kravetz 		return ret;
17190481622SDavid Gibson 
172db71ef79SMike Kravetz 	spin_lock_irq(&spool->lock);
17390481622SDavid Gibson 
1741c5ecae3SMike Kravetz 	if (spool->max_hpages != -1) {		/* maximum size accounting */
1751c5ecae3SMike Kravetz 		if ((spool->used_hpages + delta) <= spool->max_hpages)
1761c5ecae3SMike Kravetz 			spool->used_hpages += delta;
1771c5ecae3SMike Kravetz 		else {
1781c5ecae3SMike Kravetz 			ret = -ENOMEM;
1791c5ecae3SMike Kravetz 			goto unlock_ret;
1801c5ecae3SMike Kravetz 		}
1811c5ecae3SMike Kravetz 	}
1821c5ecae3SMike Kravetz 
18309a95e29SMike Kravetz 	/* minimum size accounting */
18409a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
1851c5ecae3SMike Kravetz 		if (delta > spool->rsv_hpages) {
1861c5ecae3SMike Kravetz 			/*
1871c5ecae3SMike Kravetz 			 * Asking for more reserves than those already taken on
1881c5ecae3SMike Kravetz 			 * behalf of subpool.  Return difference.
1891c5ecae3SMike Kravetz 			 */
1901c5ecae3SMike Kravetz 			ret = delta - spool->rsv_hpages;
1911c5ecae3SMike Kravetz 			spool->rsv_hpages = 0;
1921c5ecae3SMike Kravetz 		} else {
1931c5ecae3SMike Kravetz 			ret = 0;	/* reserves already accounted for */
1941c5ecae3SMike Kravetz 			spool->rsv_hpages -= delta;
1951c5ecae3SMike Kravetz 		}
1961c5ecae3SMike Kravetz 	}
1971c5ecae3SMike Kravetz 
1981c5ecae3SMike Kravetz unlock_ret:
199db71ef79SMike Kravetz 	spin_unlock_irq(&spool->lock);
20090481622SDavid Gibson 	return ret;
20190481622SDavid Gibson }
20290481622SDavid Gibson 
2031c5ecae3SMike Kravetz /*
2041c5ecae3SMike Kravetz  * Subpool accounting for freeing and unreserving pages.
2051c5ecae3SMike Kravetz  * Return the number of global page reservations that must be dropped.
2061c5ecae3SMike Kravetz  * The return value may only be different than the passed value (delta)
2071c5ecae3SMike Kravetz  * in the case where a subpool minimum size must be maintained.
2081c5ecae3SMike Kravetz  */
2091c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
21090481622SDavid Gibson 				       long delta)
21190481622SDavid Gibson {
2121c5ecae3SMike Kravetz 	long ret = delta;
213db71ef79SMike Kravetz 	unsigned long flags;
2141c5ecae3SMike Kravetz 
21590481622SDavid Gibson 	if (!spool)
2161c5ecae3SMike Kravetz 		return delta;
21790481622SDavid Gibson 
218db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
2191c5ecae3SMike Kravetz 
2201c5ecae3SMike Kravetz 	if (spool->max_hpages != -1)		/* maximum size accounting */
22190481622SDavid Gibson 		spool->used_hpages -= delta;
2221c5ecae3SMike Kravetz 
22309a95e29SMike Kravetz 	 /* minimum size accounting */
22409a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
2251c5ecae3SMike Kravetz 		if (spool->rsv_hpages + delta <= spool->min_hpages)
2261c5ecae3SMike Kravetz 			ret = 0;
2271c5ecae3SMike Kravetz 		else
2281c5ecae3SMike Kravetz 			ret = spool->rsv_hpages + delta - spool->min_hpages;
2291c5ecae3SMike Kravetz 
2301c5ecae3SMike Kravetz 		spool->rsv_hpages += delta;
2311c5ecae3SMike Kravetz 		if (spool->rsv_hpages > spool->min_hpages)
2321c5ecae3SMike Kravetz 			spool->rsv_hpages = spool->min_hpages;
2331c5ecae3SMike Kravetz 	}
2341c5ecae3SMike Kravetz 
2351c5ecae3SMike Kravetz 	/*
2361c5ecae3SMike Kravetz 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
2371c5ecae3SMike Kravetz 	 * quota reference, free it now.
2381c5ecae3SMike Kravetz 	 */
239db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
2401c5ecae3SMike Kravetz 
2411c5ecae3SMike Kravetz 	return ret;
24290481622SDavid Gibson }
24390481622SDavid Gibson 
24490481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
24590481622SDavid Gibson {
24690481622SDavid Gibson 	return HUGETLBFS_SB(inode->i_sb)->spool;
24790481622SDavid Gibson }
24890481622SDavid Gibson 
24990481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
25090481622SDavid Gibson {
251496ad9aaSAl Viro 	return subpool_inode(file_inode(vma->vm_file));
25290481622SDavid Gibson }
25390481622SDavid Gibson 
2540db9d74eSMina Almasry /* Helper that removes a struct file_region from the resv_map cache and returns
2550db9d74eSMina Almasry  * it for use.
2560db9d74eSMina Almasry  */
2570db9d74eSMina Almasry static struct file_region *
2580db9d74eSMina Almasry get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
2590db9d74eSMina Almasry {
2600db9d74eSMina Almasry 	struct file_region *nrg = NULL;
2610db9d74eSMina Almasry 
2620db9d74eSMina Almasry 	VM_BUG_ON(resv->region_cache_count <= 0);
2630db9d74eSMina Almasry 
2640db9d74eSMina Almasry 	resv->region_cache_count--;
2650db9d74eSMina Almasry 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
2660db9d74eSMina Almasry 	list_del(&nrg->link);
2670db9d74eSMina Almasry 
2680db9d74eSMina Almasry 	nrg->from = from;
2690db9d74eSMina Almasry 	nrg->to = to;
2700db9d74eSMina Almasry 
2710db9d74eSMina Almasry 	return nrg;
2720db9d74eSMina Almasry }
2730db9d74eSMina Almasry 
274075a61d0SMina Almasry static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
275075a61d0SMina Almasry 					      struct file_region *rg)
276075a61d0SMina Almasry {
277075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
278075a61d0SMina Almasry 	nrg->reservation_counter = rg->reservation_counter;
279075a61d0SMina Almasry 	nrg->css = rg->css;
280075a61d0SMina Almasry 	if (rg->css)
281075a61d0SMina Almasry 		css_get(rg->css);
282075a61d0SMina Almasry #endif
283075a61d0SMina Almasry }
284075a61d0SMina Almasry 
285075a61d0SMina Almasry /* Helper that records hugetlb_cgroup uncharge info. */
286075a61d0SMina Almasry static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
287075a61d0SMina Almasry 						struct hstate *h,
288075a61d0SMina Almasry 						struct resv_map *resv,
289075a61d0SMina Almasry 						struct file_region *nrg)
290075a61d0SMina Almasry {
291075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
292075a61d0SMina Almasry 	if (h_cg) {
293075a61d0SMina Almasry 		nrg->reservation_counter =
294075a61d0SMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
295075a61d0SMina Almasry 		nrg->css = &h_cg->css;
296d85aecf2SMiaohe Lin 		/*
297d85aecf2SMiaohe Lin 		 * The caller will hold exactly one h_cg->css reference for the
298d85aecf2SMiaohe Lin 		 * whole contiguous reservation region. But this area might be
299d85aecf2SMiaohe Lin 		 * scattered when there are already some file_regions reside in
300d85aecf2SMiaohe Lin 		 * it. As a result, many file_regions may share only one css
301d85aecf2SMiaohe Lin 		 * reference. In order to ensure that one file_region must hold
302d85aecf2SMiaohe Lin 		 * exactly one h_cg->css reference, we should do css_get for
303d85aecf2SMiaohe Lin 		 * each file_region and leave the reference held by caller
304d85aecf2SMiaohe Lin 		 * untouched.
305d85aecf2SMiaohe Lin 		 */
306d85aecf2SMiaohe Lin 		css_get(&h_cg->css);
307075a61d0SMina Almasry 		if (!resv->pages_per_hpage)
308075a61d0SMina Almasry 			resv->pages_per_hpage = pages_per_huge_page(h);
309075a61d0SMina Almasry 		/* pages_per_hpage should be the same for all entries in
310075a61d0SMina Almasry 		 * a resv_map.
311075a61d0SMina Almasry 		 */
312075a61d0SMina Almasry 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
313075a61d0SMina Almasry 	} else {
314075a61d0SMina Almasry 		nrg->reservation_counter = NULL;
315075a61d0SMina Almasry 		nrg->css = NULL;
316075a61d0SMina Almasry 	}
317075a61d0SMina Almasry #endif
318075a61d0SMina Almasry }
319075a61d0SMina Almasry 
320d85aecf2SMiaohe Lin static void put_uncharge_info(struct file_region *rg)
321d85aecf2SMiaohe Lin {
322d85aecf2SMiaohe Lin #ifdef CONFIG_CGROUP_HUGETLB
323d85aecf2SMiaohe Lin 	if (rg->css)
324d85aecf2SMiaohe Lin 		css_put(rg->css);
325d85aecf2SMiaohe Lin #endif
326d85aecf2SMiaohe Lin }
327d85aecf2SMiaohe Lin 
328a9b3f867SMina Almasry static bool has_same_uncharge_info(struct file_region *rg,
329a9b3f867SMina Almasry 				   struct file_region *org)
330a9b3f867SMina Almasry {
331a9b3f867SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
3320739eb43SBaolin Wang 	return rg->reservation_counter == org->reservation_counter &&
333a9b3f867SMina Almasry 	       rg->css == org->css;
334a9b3f867SMina Almasry 
335a9b3f867SMina Almasry #else
336a9b3f867SMina Almasry 	return true;
337a9b3f867SMina Almasry #endif
338a9b3f867SMina Almasry }
339a9b3f867SMina Almasry 
340a9b3f867SMina Almasry static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
341a9b3f867SMina Almasry {
342a9b3f867SMina Almasry 	struct file_region *nrg = NULL, *prg = NULL;
343a9b3f867SMina Almasry 
344a9b3f867SMina Almasry 	prg = list_prev_entry(rg, link);
345a9b3f867SMina Almasry 	if (&prg->link != &resv->regions && prg->to == rg->from &&
346a9b3f867SMina Almasry 	    has_same_uncharge_info(prg, rg)) {
347a9b3f867SMina Almasry 		prg->to = rg->to;
348a9b3f867SMina Almasry 
349a9b3f867SMina Almasry 		list_del(&rg->link);
350d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
351a9b3f867SMina Almasry 		kfree(rg);
352a9b3f867SMina Almasry 
3537db5e7b6SWei Yang 		rg = prg;
354a9b3f867SMina Almasry 	}
355a9b3f867SMina Almasry 
356a9b3f867SMina Almasry 	nrg = list_next_entry(rg, link);
357a9b3f867SMina Almasry 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
358a9b3f867SMina Almasry 	    has_same_uncharge_info(nrg, rg)) {
359a9b3f867SMina Almasry 		nrg->from = rg->from;
360a9b3f867SMina Almasry 
361a9b3f867SMina Almasry 		list_del(&rg->link);
362d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
363a9b3f867SMina Almasry 		kfree(rg);
364a9b3f867SMina Almasry 	}
365a9b3f867SMina Almasry }
366a9b3f867SMina Almasry 
3672103cf9cSPeter Xu static inline long
36884448c8eSJakob Koschel hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
3692103cf9cSPeter Xu 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
3702103cf9cSPeter Xu 		     long *regions_needed)
3712103cf9cSPeter Xu {
3722103cf9cSPeter Xu 	struct file_region *nrg;
3732103cf9cSPeter Xu 
3742103cf9cSPeter Xu 	if (!regions_needed) {
3752103cf9cSPeter Xu 		nrg = get_file_region_entry_from_cache(map, from, to);
3762103cf9cSPeter Xu 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
37784448c8eSJakob Koschel 		list_add(&nrg->link, rg);
3782103cf9cSPeter Xu 		coalesce_file_region(map, nrg);
3792103cf9cSPeter Xu 	} else
3802103cf9cSPeter Xu 		*regions_needed += 1;
3812103cf9cSPeter Xu 
3822103cf9cSPeter Xu 	return to - from;
3832103cf9cSPeter Xu }
3842103cf9cSPeter Xu 
385972a3da3SWei Yang /*
386972a3da3SWei Yang  * Must be called with resv->lock held.
387972a3da3SWei Yang  *
388972a3da3SWei Yang  * Calling this with regions_needed != NULL will count the number of pages
389972a3da3SWei Yang  * to be added but will not modify the linked list. And regions_needed will
390972a3da3SWei Yang  * indicate the number of file_regions needed in the cache to carry out to add
391972a3da3SWei Yang  * the regions for this range.
392d75c6af9SMina Almasry  */
393d75c6af9SMina Almasry static long add_reservation_in_range(struct resv_map *resv, long f, long t,
394075a61d0SMina Almasry 				     struct hugetlb_cgroup *h_cg,
395972a3da3SWei Yang 				     struct hstate *h, long *regions_needed)
396d75c6af9SMina Almasry {
3970db9d74eSMina Almasry 	long add = 0;
398d75c6af9SMina Almasry 	struct list_head *head = &resv->regions;
3990db9d74eSMina Almasry 	long last_accounted_offset = f;
40084448c8eSJakob Koschel 	struct file_region *iter, *trg = NULL;
40184448c8eSJakob Koschel 	struct list_head *rg = NULL;
402d75c6af9SMina Almasry 
4030db9d74eSMina Almasry 	if (regions_needed)
4040db9d74eSMina Almasry 		*regions_needed = 0;
405d75c6af9SMina Almasry 
4060db9d74eSMina Almasry 	/* In this loop, we essentially handle an entry for the range
40784448c8eSJakob Koschel 	 * [last_accounted_offset, iter->from), at every iteration, with some
4080db9d74eSMina Almasry 	 * bounds checking.
4090db9d74eSMina Almasry 	 */
41084448c8eSJakob Koschel 	list_for_each_entry_safe(iter, trg, head, link) {
4110db9d74eSMina Almasry 		/* Skip irrelevant regions that start before our range. */
41284448c8eSJakob Koschel 		if (iter->from < f) {
4130db9d74eSMina Almasry 			/* If this region ends after the last accounted offset,
4140db9d74eSMina Almasry 			 * then we need to update last_accounted_offset.
4150db9d74eSMina Almasry 			 */
41684448c8eSJakob Koschel 			if (iter->to > last_accounted_offset)
41784448c8eSJakob Koschel 				last_accounted_offset = iter->to;
4180db9d74eSMina Almasry 			continue;
4190db9d74eSMina Almasry 		}
420d75c6af9SMina Almasry 
4210db9d74eSMina Almasry 		/* When we find a region that starts beyond our range, we've
4220db9d74eSMina Almasry 		 * finished.
4230db9d74eSMina Almasry 		 */
42484448c8eSJakob Koschel 		if (iter->from >= t) {
42584448c8eSJakob Koschel 			rg = iter->link.prev;
426d75c6af9SMina Almasry 			break;
42784448c8eSJakob Koschel 		}
428d75c6af9SMina Almasry 
42984448c8eSJakob Koschel 		/* Add an entry for last_accounted_offset -> iter->from, and
4300db9d74eSMina Almasry 		 * update last_accounted_offset.
431d75c6af9SMina Almasry 		 */
43284448c8eSJakob Koschel 		if (iter->from > last_accounted_offset)
43384448c8eSJakob Koschel 			add += hugetlb_resv_map_add(resv, iter->link.prev,
4342103cf9cSPeter Xu 						    last_accounted_offset,
43584448c8eSJakob Koschel 						    iter->from, h, h_cg,
4362103cf9cSPeter Xu 						    regions_needed);
437d75c6af9SMina Almasry 
43884448c8eSJakob Koschel 		last_accounted_offset = iter->to;
4390db9d74eSMina Almasry 	}
4400db9d74eSMina Almasry 
4410db9d74eSMina Almasry 	/* Handle the case where our range extends beyond
4420db9d74eSMina Almasry 	 * last_accounted_offset.
4430db9d74eSMina Almasry 	 */
44484448c8eSJakob Koschel 	if (!rg)
44584448c8eSJakob Koschel 		rg = head->prev;
4462103cf9cSPeter Xu 	if (last_accounted_offset < t)
4472103cf9cSPeter Xu 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
4482103cf9cSPeter Xu 					    t, h, h_cg, regions_needed);
4490db9d74eSMina Almasry 
4500db9d74eSMina Almasry 	return add;
4510db9d74eSMina Almasry }
4520db9d74eSMina Almasry 
4530db9d74eSMina Almasry /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
4540db9d74eSMina Almasry  */
4550db9d74eSMina Almasry static int allocate_file_region_entries(struct resv_map *resv,
4560db9d74eSMina Almasry 					int regions_needed)
4570db9d74eSMina Almasry 	__must_hold(&resv->lock)
4580db9d74eSMina Almasry {
45934665341SMiaohe Lin 	LIST_HEAD(allocated_regions);
4600db9d74eSMina Almasry 	int to_allocate = 0, i = 0;
4610db9d74eSMina Almasry 	struct file_region *trg = NULL, *rg = NULL;
4620db9d74eSMina Almasry 
4630db9d74eSMina Almasry 	VM_BUG_ON(regions_needed < 0);
4640db9d74eSMina Almasry 
4650db9d74eSMina Almasry 	/*
4660db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
4670db9d74eSMina Almasry 	 * the number of in progress add operations plus regions_needed.
4680db9d74eSMina Almasry 	 *
4690db9d74eSMina Almasry 	 * This is a while loop because when we drop the lock, some other call
4700db9d74eSMina Almasry 	 * to region_add or region_del may have consumed some region_entries,
4710db9d74eSMina Almasry 	 * so we keep looping here until we finally have enough entries for
4720db9d74eSMina Almasry 	 * (adds_in_progress + regions_needed).
4730db9d74eSMina Almasry 	 */
4740db9d74eSMina Almasry 	while (resv->region_cache_count <
4750db9d74eSMina Almasry 	       (resv->adds_in_progress + regions_needed)) {
4760db9d74eSMina Almasry 		to_allocate = resv->adds_in_progress + regions_needed -
4770db9d74eSMina Almasry 			      resv->region_cache_count;
4780db9d74eSMina Almasry 
4790db9d74eSMina Almasry 		/* At this point, we should have enough entries in the cache
480f0953a1bSIngo Molnar 		 * for all the existing adds_in_progress. We should only be
4810db9d74eSMina Almasry 		 * needing to allocate for regions_needed.
4820db9d74eSMina Almasry 		 */
4830db9d74eSMina Almasry 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
4840db9d74eSMina Almasry 
4850db9d74eSMina Almasry 		spin_unlock(&resv->lock);
4860db9d74eSMina Almasry 		for (i = 0; i < to_allocate; i++) {
4870db9d74eSMina Almasry 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
4880db9d74eSMina Almasry 			if (!trg)
4890db9d74eSMina Almasry 				goto out_of_memory;
4900db9d74eSMina Almasry 			list_add(&trg->link, &allocated_regions);
4910db9d74eSMina Almasry 		}
4920db9d74eSMina Almasry 
4930db9d74eSMina Almasry 		spin_lock(&resv->lock);
4940db9d74eSMina Almasry 
495d3ec7b6eSWei Yang 		list_splice(&allocated_regions, &resv->region_cache);
496d3ec7b6eSWei Yang 		resv->region_cache_count += to_allocate;
4970db9d74eSMina Almasry 	}
4980db9d74eSMina Almasry 
4990db9d74eSMina Almasry 	return 0;
5000db9d74eSMina Almasry 
5010db9d74eSMina Almasry out_of_memory:
5020db9d74eSMina Almasry 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
503d75c6af9SMina Almasry 		list_del(&rg->link);
504d75c6af9SMina Almasry 		kfree(rg);
505d75c6af9SMina Almasry 	}
5060db9d74eSMina Almasry 	return -ENOMEM;
507d75c6af9SMina Almasry }
508d75c6af9SMina Almasry 
5091dd308a7SMike Kravetz /*
5101dd308a7SMike Kravetz  * Add the huge page range represented by [f, t) to the reserve
5110db9d74eSMina Almasry  * map.  Regions will be taken from the cache to fill in this range.
5120db9d74eSMina Almasry  * Sufficient regions should exist in the cache due to the previous
5130db9d74eSMina Almasry  * call to region_chg with the same range, but in some cases the cache will not
5140db9d74eSMina Almasry  * have sufficient entries due to races with other code doing region_add or
5150db9d74eSMina Almasry  * region_del.  The extra needed entries will be allocated.
516cf3ad20bSMike Kravetz  *
5170db9d74eSMina Almasry  * regions_needed is the out value provided by a previous call to region_chg.
5180db9d74eSMina Almasry  *
5190db9d74eSMina Almasry  * Return the number of new huge pages added to the map.  This number is greater
5200db9d74eSMina Almasry  * than or equal to zero.  If file_region entries needed to be allocated for
5217c8de358SEthon Paul  * this operation and we were not able to allocate, it returns -ENOMEM.
5220db9d74eSMina Almasry  * region_add of regions of length 1 never allocate file_regions and cannot
5230db9d74eSMina Almasry  * fail; region_chg will always allocate at least 1 entry and a region_add for
5240db9d74eSMina Almasry  * 1 page will only require at most 1 entry.
5251dd308a7SMike Kravetz  */
5260db9d74eSMina Almasry static long region_add(struct resv_map *resv, long f, long t,
527075a61d0SMina Almasry 		       long in_regions_needed, struct hstate *h,
528075a61d0SMina Almasry 		       struct hugetlb_cgroup *h_cg)
52996822904SAndy Whitcroft {
5300db9d74eSMina Almasry 	long add = 0, actual_regions_needed = 0;
53196822904SAndy Whitcroft 
5327b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
5330db9d74eSMina Almasry retry:
5340db9d74eSMina Almasry 
5350db9d74eSMina Almasry 	/* Count how many regions are actually needed to execute this add. */
536972a3da3SWei Yang 	add_reservation_in_range(resv, f, t, NULL, NULL,
537972a3da3SWei Yang 				 &actual_regions_needed);
53896822904SAndy Whitcroft 
5395e911373SMike Kravetz 	/*
5400db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
5410db9d74eSMina Almasry 	 * this add operation. Note that actual_regions_needed may be greater
5420db9d74eSMina Almasry 	 * than in_regions_needed, as the resv_map may have been modified since
5430db9d74eSMina Almasry 	 * the region_chg call. In this case, we need to make sure that we
5440db9d74eSMina Almasry 	 * allocate extra entries, such that we have enough for all the
5450db9d74eSMina Almasry 	 * existing adds_in_progress, plus the excess needed for this
5460db9d74eSMina Almasry 	 * operation.
5475e911373SMike Kravetz 	 */
5480db9d74eSMina Almasry 	if (actual_regions_needed > in_regions_needed &&
5490db9d74eSMina Almasry 	    resv->region_cache_count <
5500db9d74eSMina Almasry 		    resv->adds_in_progress +
5510db9d74eSMina Almasry 			    (actual_regions_needed - in_regions_needed)) {
5520db9d74eSMina Almasry 		/* region_add operation of range 1 should never need to
5530db9d74eSMina Almasry 		 * allocate file_region entries.
5540db9d74eSMina Almasry 		 */
5550db9d74eSMina Almasry 		VM_BUG_ON(t - f <= 1);
5565e911373SMike Kravetz 
5570db9d74eSMina Almasry 		if (allocate_file_region_entries(
5580db9d74eSMina Almasry 			    resv, actual_regions_needed - in_regions_needed)) {
5590db9d74eSMina Almasry 			return -ENOMEM;
5605e911373SMike Kravetz 		}
5615e911373SMike Kravetz 
5620db9d74eSMina Almasry 		goto retry;
5630db9d74eSMina Almasry 	}
564cf3ad20bSMike Kravetz 
565972a3da3SWei Yang 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
5660db9d74eSMina Almasry 
5670db9d74eSMina Almasry 	resv->adds_in_progress -= in_regions_needed;
5680db9d74eSMina Almasry 
5697b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
570cf3ad20bSMike Kravetz 	return add;
57196822904SAndy Whitcroft }
57296822904SAndy Whitcroft 
5731dd308a7SMike Kravetz /*
5741dd308a7SMike Kravetz  * Examine the existing reserve map and determine how many
5751dd308a7SMike Kravetz  * huge pages in the specified range [f, t) are NOT currently
5761dd308a7SMike Kravetz  * represented.  This routine is called before a subsequent
5771dd308a7SMike Kravetz  * call to region_add that will actually modify the reserve
5781dd308a7SMike Kravetz  * map to add the specified range [f, t).  region_chg does
5791dd308a7SMike Kravetz  * not change the number of huge pages represented by the
5800db9d74eSMina Almasry  * map.  A number of new file_region structures is added to the cache as a
5810db9d74eSMina Almasry  * placeholder, for the subsequent region_add call to use. At least 1
5820db9d74eSMina Almasry  * file_region structure is added.
5830db9d74eSMina Almasry  *
5840db9d74eSMina Almasry  * out_regions_needed is the number of regions added to the
5850db9d74eSMina Almasry  * resv->adds_in_progress.  This value needs to be provided to a follow up call
5860db9d74eSMina Almasry  * to region_add or region_abort for proper accounting.
5875e911373SMike Kravetz  *
5885e911373SMike Kravetz  * Returns the number of huge pages that need to be added to the existing
5895e911373SMike Kravetz  * reservation map for the range [f, t).  This number is greater or equal to
5905e911373SMike Kravetz  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
5915e911373SMike Kravetz  * is needed and can not be allocated.
5921dd308a7SMike Kravetz  */
5930db9d74eSMina Almasry static long region_chg(struct resv_map *resv, long f, long t,
5940db9d74eSMina Almasry 		       long *out_regions_needed)
59596822904SAndy Whitcroft {
59696822904SAndy Whitcroft 	long chg = 0;
59796822904SAndy Whitcroft 
5987b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
5995e911373SMike Kravetz 
600972a3da3SWei Yang 	/* Count how many hugepages in this range are NOT represented. */
601075a61d0SMina Almasry 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
602972a3da3SWei Yang 				       out_regions_needed);
6035e911373SMike Kravetz 
6040db9d74eSMina Almasry 	if (*out_regions_needed == 0)
6050db9d74eSMina Almasry 		*out_regions_needed = 1;
6065e911373SMike Kravetz 
6070db9d74eSMina Almasry 	if (allocate_file_region_entries(resv, *out_regions_needed))
6085e911373SMike Kravetz 		return -ENOMEM;
6095e911373SMike Kravetz 
6100db9d74eSMina Almasry 	resv->adds_in_progress += *out_regions_needed;
61196822904SAndy Whitcroft 
6127b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
61396822904SAndy Whitcroft 	return chg;
61496822904SAndy Whitcroft }
61596822904SAndy Whitcroft 
6161dd308a7SMike Kravetz /*
6175e911373SMike Kravetz  * Abort the in progress add operation.  The adds_in_progress field
6185e911373SMike Kravetz  * of the resv_map keeps track of the operations in progress between
6195e911373SMike Kravetz  * calls to region_chg and region_add.  Operations are sometimes
6205e911373SMike Kravetz  * aborted after the call to region_chg.  In such cases, region_abort
6210db9d74eSMina Almasry  * is called to decrement the adds_in_progress counter. regions_needed
6220db9d74eSMina Almasry  * is the value returned by the region_chg call, it is used to decrement
6230db9d74eSMina Almasry  * the adds_in_progress counter.
6245e911373SMike Kravetz  *
6255e911373SMike Kravetz  * NOTE: The range arguments [f, t) are not needed or used in this
6265e911373SMike Kravetz  * routine.  They are kept to make reading the calling code easier as
6275e911373SMike Kravetz  * arguments will match the associated region_chg call.
6285e911373SMike Kravetz  */
6290db9d74eSMina Almasry static void region_abort(struct resv_map *resv, long f, long t,
6300db9d74eSMina Almasry 			 long regions_needed)
6315e911373SMike Kravetz {
6325e911373SMike Kravetz 	spin_lock(&resv->lock);
6335e911373SMike Kravetz 	VM_BUG_ON(!resv->region_cache_count);
6340db9d74eSMina Almasry 	resv->adds_in_progress -= regions_needed;
6355e911373SMike Kravetz 	spin_unlock(&resv->lock);
6365e911373SMike Kravetz }
6375e911373SMike Kravetz 
6385e911373SMike Kravetz /*
639feba16e2SMike Kravetz  * Delete the specified range [f, t) from the reserve map.  If the
640feba16e2SMike Kravetz  * t parameter is LONG_MAX, this indicates that ALL regions after f
641feba16e2SMike Kravetz  * should be deleted.  Locate the regions which intersect [f, t)
642feba16e2SMike Kravetz  * and either trim, delete or split the existing regions.
643feba16e2SMike Kravetz  *
644feba16e2SMike Kravetz  * Returns the number of huge pages deleted from the reserve map.
645feba16e2SMike Kravetz  * In the normal case, the return value is zero or more.  In the
646feba16e2SMike Kravetz  * case where a region must be split, a new region descriptor must
647feba16e2SMike Kravetz  * be allocated.  If the allocation fails, -ENOMEM will be returned.
648feba16e2SMike Kravetz  * NOTE: If the parameter t == LONG_MAX, then we will never split
649feba16e2SMike Kravetz  * a region and possibly return -ENOMEM.  Callers specifying
650feba16e2SMike Kravetz  * t == LONG_MAX do not need to check for -ENOMEM error.
6511dd308a7SMike Kravetz  */
652feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
65396822904SAndy Whitcroft {
6541406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
65596822904SAndy Whitcroft 	struct file_region *rg, *trg;
656feba16e2SMike Kravetz 	struct file_region *nrg = NULL;
657feba16e2SMike Kravetz 	long del = 0;
65896822904SAndy Whitcroft 
659feba16e2SMike Kravetz retry:
6607b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
661feba16e2SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
662dbe409e4SMike Kravetz 		/*
663dbe409e4SMike Kravetz 		 * Skip regions before the range to be deleted.  file_region
664dbe409e4SMike Kravetz 		 * ranges are normally of the form [from, to).  However, there
665dbe409e4SMike Kravetz 		 * may be a "placeholder" entry in the map which is of the form
666dbe409e4SMike Kravetz 		 * (from, to) with from == to.  Check for placeholder entries
667dbe409e4SMike Kravetz 		 * at the beginning of the range to be deleted.
668dbe409e4SMike Kravetz 		 */
669dbe409e4SMike Kravetz 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
670feba16e2SMike Kravetz 			continue;
671dbe409e4SMike Kravetz 
672feba16e2SMike Kravetz 		if (rg->from >= t)
67396822904SAndy Whitcroft 			break;
67496822904SAndy Whitcroft 
675feba16e2SMike Kravetz 		if (f > rg->from && t < rg->to) { /* Must split region */
676feba16e2SMike Kravetz 			/*
677feba16e2SMike Kravetz 			 * Check for an entry in the cache before dropping
678feba16e2SMike Kravetz 			 * lock and attempting allocation.
679feba16e2SMike Kravetz 			 */
680feba16e2SMike Kravetz 			if (!nrg &&
681feba16e2SMike Kravetz 			    resv->region_cache_count > resv->adds_in_progress) {
682feba16e2SMike Kravetz 				nrg = list_first_entry(&resv->region_cache,
683feba16e2SMike Kravetz 							struct file_region,
684feba16e2SMike Kravetz 							link);
685feba16e2SMike Kravetz 				list_del(&nrg->link);
686feba16e2SMike Kravetz 				resv->region_cache_count--;
68796822904SAndy Whitcroft 			}
68896822904SAndy Whitcroft 
689feba16e2SMike Kravetz 			if (!nrg) {
690feba16e2SMike Kravetz 				spin_unlock(&resv->lock);
691feba16e2SMike Kravetz 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
692feba16e2SMike Kravetz 				if (!nrg)
693feba16e2SMike Kravetz 					return -ENOMEM;
694feba16e2SMike Kravetz 				goto retry;
695feba16e2SMike Kravetz 			}
696feba16e2SMike Kravetz 
697feba16e2SMike Kravetz 			del += t - f;
69879aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_file_region(
699d85aecf2SMiaohe Lin 				resv, rg, t - f, false);
700feba16e2SMike Kravetz 
701feba16e2SMike Kravetz 			/* New entry for end of split region */
702feba16e2SMike Kravetz 			nrg->from = t;
703feba16e2SMike Kravetz 			nrg->to = rg->to;
704075a61d0SMina Almasry 
705075a61d0SMina Almasry 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
706075a61d0SMina Almasry 
707feba16e2SMike Kravetz 			INIT_LIST_HEAD(&nrg->link);
708feba16e2SMike Kravetz 
709feba16e2SMike Kravetz 			/* Original entry is trimmed */
710feba16e2SMike Kravetz 			rg->to = f;
711feba16e2SMike Kravetz 
712feba16e2SMike Kravetz 			list_add(&nrg->link, &rg->link);
713feba16e2SMike Kravetz 			nrg = NULL;
71496822904SAndy Whitcroft 			break;
715feba16e2SMike Kravetz 		}
716feba16e2SMike Kravetz 
717feba16e2SMike Kravetz 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
718feba16e2SMike Kravetz 			del += rg->to - rg->from;
719075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
720d85aecf2SMiaohe Lin 							    rg->to - rg->from, true);
72196822904SAndy Whitcroft 			list_del(&rg->link);
72296822904SAndy Whitcroft 			kfree(rg);
723feba16e2SMike Kravetz 			continue;
72496822904SAndy Whitcroft 		}
7257b24d861SDavidlohr Bueso 
726feba16e2SMike Kravetz 		if (f <= rg->from) {	/* Trim beginning of region */
727075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
728d85aecf2SMiaohe Lin 							    t - rg->from, false);
729075a61d0SMina Almasry 
73079aa925bSMike Kravetz 			del += t - rg->from;
73179aa925bSMike Kravetz 			rg->from = t;
73279aa925bSMike Kravetz 		} else {		/* Trim end of region */
733075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
734d85aecf2SMiaohe Lin 							    rg->to - f, false);
73579aa925bSMike Kravetz 
73679aa925bSMike Kravetz 			del += rg->to - f;
73779aa925bSMike Kravetz 			rg->to = f;
738feba16e2SMike Kravetz 		}
739feba16e2SMike Kravetz 	}
740feba16e2SMike Kravetz 
7417b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
742feba16e2SMike Kravetz 	kfree(nrg);
743feba16e2SMike Kravetz 	return del;
74496822904SAndy Whitcroft }
74596822904SAndy Whitcroft 
7461dd308a7SMike Kravetz /*
747b5cec28dSMike Kravetz  * A rare out of memory error was encountered which prevented removal of
748b5cec28dSMike Kravetz  * the reserve map region for a page.  The huge page itself was free'ed
749b5cec28dSMike Kravetz  * and removed from the page cache.  This routine will adjust the subpool
750b5cec28dSMike Kravetz  * usage count, and the global reserve count if needed.  By incrementing
751b5cec28dSMike Kravetz  * these counts, the reserve map entry which could not be deleted will
752b5cec28dSMike Kravetz  * appear as a "reserved" entry instead of simply dangling with incorrect
753b5cec28dSMike Kravetz  * counts.
754b5cec28dSMike Kravetz  */
75572e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
756b5cec28dSMike Kravetz {
757b5cec28dSMike Kravetz 	struct hugepage_subpool *spool = subpool_inode(inode);
758b5cec28dSMike Kravetz 	long rsv_adjust;
759da56388cSMiaohe Lin 	bool reserved = false;
760b5cec28dSMike Kravetz 
761b5cec28dSMike Kravetz 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
762da56388cSMiaohe Lin 	if (rsv_adjust > 0) {
763b5cec28dSMike Kravetz 		struct hstate *h = hstate_inode(inode);
764b5cec28dSMike Kravetz 
765da56388cSMiaohe Lin 		if (!hugetlb_acct_memory(h, 1))
766da56388cSMiaohe Lin 			reserved = true;
767da56388cSMiaohe Lin 	} else if (!rsv_adjust) {
768da56388cSMiaohe Lin 		reserved = true;
769b5cec28dSMike Kravetz 	}
770da56388cSMiaohe Lin 
771da56388cSMiaohe Lin 	if (!reserved)
772da56388cSMiaohe Lin 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
773b5cec28dSMike Kravetz }
774b5cec28dSMike Kravetz 
775b5cec28dSMike Kravetz /*
7761dd308a7SMike Kravetz  * Count and return the number of huge pages in the reserve map
7771dd308a7SMike Kravetz  * that intersect with the range [f, t).
7781dd308a7SMike Kravetz  */
7791406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
78084afd99bSAndy Whitcroft {
7811406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
78284afd99bSAndy Whitcroft 	struct file_region *rg;
78384afd99bSAndy Whitcroft 	long chg = 0;
78484afd99bSAndy Whitcroft 
7857b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
78684afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
78784afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
788f2135a4aSWang Sheng-Hui 		long seg_from;
789f2135a4aSWang Sheng-Hui 		long seg_to;
79084afd99bSAndy Whitcroft 
79184afd99bSAndy Whitcroft 		if (rg->to <= f)
79284afd99bSAndy Whitcroft 			continue;
79384afd99bSAndy Whitcroft 		if (rg->from >= t)
79484afd99bSAndy Whitcroft 			break;
79584afd99bSAndy Whitcroft 
79684afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
79784afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
79884afd99bSAndy Whitcroft 
79984afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
80084afd99bSAndy Whitcroft 	}
8017b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
80284afd99bSAndy Whitcroft 
80384afd99bSAndy Whitcroft 	return chg;
80484afd99bSAndy Whitcroft }
80584afd99bSAndy Whitcroft 
80696822904SAndy Whitcroft /*
807e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
808e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
809e7c4b0bfSAndy Whitcroft  */
810a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
811a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
812e7c4b0bfSAndy Whitcroft {
813a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
814a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
815e7c4b0bfSAndy Whitcroft }
816e7c4b0bfSAndy Whitcroft 
8170fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
8180fe6e20bSNaoya Horiguchi 				     unsigned long address)
8190fe6e20bSNaoya Horiguchi {
8200fe6e20bSNaoya Horiguchi 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
8210fe6e20bSNaoya Horiguchi }
822dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
8230fe6e20bSNaoya Horiguchi 
82484afd99bSAndy Whitcroft /*
82508fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
82608fba699SMel Gorman  * cases this will be same size as used by the page table entries.
82708fba699SMel Gorman  */
82808fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
82908fba699SMel Gorman {
83005ea8860SDan Williams 	if (vma->vm_ops && vma->vm_ops->pagesize)
83105ea8860SDan Williams 		return vma->vm_ops->pagesize(vma);
83208fba699SMel Gorman 	return PAGE_SIZE;
83308fba699SMel Gorman }
834f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
83508fba699SMel Gorman 
83608fba699SMel Gorman /*
8373340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
8383340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
83909135cc5SDan Williams  * architectures where it differs, an architecture-specific 'strong'
84009135cc5SDan Williams  * version of this symbol is required.
8413340289dSMel Gorman  */
84209135cc5SDan Williams __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
8433340289dSMel Gorman {
8443340289dSMel Gorman 	return vma_kernel_pagesize(vma);
8453340289dSMel Gorman }
8463340289dSMel Gorman 
8473340289dSMel Gorman /*
84884afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
84984afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
85084afd99bSAndy Whitcroft  * alignment.
85184afd99bSAndy Whitcroft  */
85284afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
85384afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
85404f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
85584afd99bSAndy Whitcroft 
856a1e78772SMel Gorman /*
857a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
858a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
859a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
860a1e78772SMel Gorman  *
861a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
862a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
863a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
864a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
86584afd99bSAndy Whitcroft  *
86684afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
86784afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
86884afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
86984afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
87084afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
87184afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
87284afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
87384afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
874a1e78772SMel Gorman  */
875e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
876e7c4b0bfSAndy Whitcroft {
877e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
878e7c4b0bfSAndy Whitcroft }
879e7c4b0bfSAndy Whitcroft 
880e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
881e7c4b0bfSAndy Whitcroft 							unsigned long value)
882e7c4b0bfSAndy Whitcroft {
883e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
884e7c4b0bfSAndy Whitcroft }
885e7c4b0bfSAndy Whitcroft 
886e9fe92aeSMina Almasry static void
887e9fe92aeSMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
888e9fe92aeSMina Almasry 					  struct hugetlb_cgroup *h_cg,
889e9fe92aeSMina Almasry 					  struct hstate *h)
890e9fe92aeSMina Almasry {
891e9fe92aeSMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
892e9fe92aeSMina Almasry 	if (!h_cg || !h) {
893e9fe92aeSMina Almasry 		resv_map->reservation_counter = NULL;
894e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = 0;
895e9fe92aeSMina Almasry 		resv_map->css = NULL;
896e9fe92aeSMina Almasry 	} else {
897e9fe92aeSMina Almasry 		resv_map->reservation_counter =
898e9fe92aeSMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
899e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = pages_per_huge_page(h);
900e9fe92aeSMina Almasry 		resv_map->css = &h_cg->css;
901e9fe92aeSMina Almasry 	}
902e9fe92aeSMina Almasry #endif
903e9fe92aeSMina Almasry }
904e9fe92aeSMina Almasry 
9059119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
90684afd99bSAndy Whitcroft {
90784afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
9085e911373SMike Kravetz 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
9095e911373SMike Kravetz 
9105e911373SMike Kravetz 	if (!resv_map || !rg) {
9115e911373SMike Kravetz 		kfree(resv_map);
9125e911373SMike Kravetz 		kfree(rg);
91384afd99bSAndy Whitcroft 		return NULL;
9145e911373SMike Kravetz 	}
91584afd99bSAndy Whitcroft 
91684afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
9177b24d861SDavidlohr Bueso 	spin_lock_init(&resv_map->lock);
91884afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
91984afd99bSAndy Whitcroft 
9205e911373SMike Kravetz 	resv_map->adds_in_progress = 0;
921e9fe92aeSMina Almasry 	/*
922e9fe92aeSMina Almasry 	 * Initialize these to 0. On shared mappings, 0's here indicate these
923e9fe92aeSMina Almasry 	 * fields don't do cgroup accounting. On private mappings, these will be
924e9fe92aeSMina Almasry 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
925e9fe92aeSMina Almasry 	 * reservations are to be un-charged from here.
926e9fe92aeSMina Almasry 	 */
927e9fe92aeSMina Almasry 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
9285e911373SMike Kravetz 
9295e911373SMike Kravetz 	INIT_LIST_HEAD(&resv_map->region_cache);
9305e911373SMike Kravetz 	list_add(&rg->link, &resv_map->region_cache);
9315e911373SMike Kravetz 	resv_map->region_cache_count = 1;
9325e911373SMike Kravetz 
93384afd99bSAndy Whitcroft 	return resv_map;
93484afd99bSAndy Whitcroft }
93584afd99bSAndy Whitcroft 
9369119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
93784afd99bSAndy Whitcroft {
93884afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
9395e911373SMike Kravetz 	struct list_head *head = &resv_map->region_cache;
9405e911373SMike Kravetz 	struct file_region *rg, *trg;
94184afd99bSAndy Whitcroft 
94284afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
943feba16e2SMike Kravetz 	region_del(resv_map, 0, LONG_MAX);
9445e911373SMike Kravetz 
9455e911373SMike Kravetz 	/* ... and any entries left in the cache */
9465e911373SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
9475e911373SMike Kravetz 		list_del(&rg->link);
9485e911373SMike Kravetz 		kfree(rg);
9495e911373SMike Kravetz 	}
9505e911373SMike Kravetz 
9515e911373SMike Kravetz 	VM_BUG_ON(resv_map->adds_in_progress);
9525e911373SMike Kravetz 
95384afd99bSAndy Whitcroft 	kfree(resv_map);
95484afd99bSAndy Whitcroft }
95584afd99bSAndy Whitcroft 
9564e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
9574e35f483SJoonsoo Kim {
958f27a5136SMike Kravetz 	/*
959f27a5136SMike Kravetz 	 * At inode evict time, i_mapping may not point to the original
960f27a5136SMike Kravetz 	 * address space within the inode.  This original address space
961f27a5136SMike Kravetz 	 * contains the pointer to the resv_map.  So, always use the
962f27a5136SMike Kravetz 	 * address space embedded within the inode.
963f27a5136SMike Kravetz 	 * The VERY common case is inode->mapping == &inode->i_data but,
964f27a5136SMike Kravetz 	 * this may not be true for device special inodes.
965f27a5136SMike Kravetz 	 */
966f27a5136SMike Kravetz 	return (struct resv_map *)(&inode->i_data)->private_data;
9674e35f483SJoonsoo Kim }
9684e35f483SJoonsoo Kim 
96984afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
970a1e78772SMel Gorman {
97181d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
9724e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE) {
9734e35f483SJoonsoo Kim 		struct address_space *mapping = vma->vm_file->f_mapping;
9744e35f483SJoonsoo Kim 		struct inode *inode = mapping->host;
9754e35f483SJoonsoo Kim 
9764e35f483SJoonsoo Kim 		return inode_resv_map(inode);
9774e35f483SJoonsoo Kim 
9784e35f483SJoonsoo Kim 	} else {
97984afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
98084afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
9814e35f483SJoonsoo Kim 	}
982a1e78772SMel Gorman }
983a1e78772SMel Gorman 
98484afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
985a1e78772SMel Gorman {
98681d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
98781d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
988a1e78772SMel Gorman 
98984afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
99084afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
99104f2cbe3SMel Gorman }
99204f2cbe3SMel Gorman 
99304f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
99404f2cbe3SMel Gorman {
99581d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
99681d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
997e7c4b0bfSAndy Whitcroft 
998e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
99904f2cbe3SMel Gorman }
100004f2cbe3SMel Gorman 
100104f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
100204f2cbe3SMel Gorman {
100381d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1004e7c4b0bfSAndy Whitcroft 
1005e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
1006a1e78772SMel Gorman }
1007a1e78772SMel Gorman 
100804f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1009a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1010a1e78772SMel Gorman {
101181d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1012f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
1013a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
1014a1e78772SMel Gorman }
1015a1e78772SMel Gorman 
1016550a7d60SMina Almasry /*
1017550a7d60SMina Almasry  * Reset and decrement one ref on hugepage private reservation.
1018550a7d60SMina Almasry  * Called with mm->mmap_sem writer semaphore held.
1019550a7d60SMina Almasry  * This function should be only used by move_vma() and operate on
1020550a7d60SMina Almasry  * same sized vma. It should never come here with last ref on the
1021550a7d60SMina Almasry  * reservation.
1022550a7d60SMina Almasry  */
1023550a7d60SMina Almasry void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1024550a7d60SMina Almasry {
1025550a7d60SMina Almasry 	/*
1026550a7d60SMina Almasry 	 * Clear the old hugetlb private page reservation.
1027550a7d60SMina Almasry 	 * It has already been transferred to new_vma.
1028550a7d60SMina Almasry 	 *
1029550a7d60SMina Almasry 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1030550a7d60SMina Almasry 	 * which copies vma into new_vma and unmaps vma. After the copy
1031550a7d60SMina Almasry 	 * operation both new_vma and vma share a reference to the resv_map
1032550a7d60SMina Almasry 	 * struct, and at that point vma is about to be unmapped. We don't
1033550a7d60SMina Almasry 	 * want to return the reservation to the pool at unmap of vma because
1034550a7d60SMina Almasry 	 * the reservation still lives on in new_vma, so simply decrement the
1035550a7d60SMina Almasry 	 * ref here and remove the resv_map reference from this vma.
1036550a7d60SMina Almasry 	 */
1037550a7d60SMina Almasry 	struct resv_map *reservations = vma_resv_map(vma);
1038550a7d60SMina Almasry 
1039afe041c2SBui Quang Minh 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1040afe041c2SBui Quang Minh 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1041550a7d60SMina Almasry 		kref_put(&reservations->refs, resv_map_release);
1042afe041c2SBui Quang Minh 	}
1043550a7d60SMina Almasry 
1044550a7d60SMina Almasry 	reset_vma_resv_huge_pages(vma);
1045550a7d60SMina Almasry }
1046550a7d60SMina Almasry 
1047a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
1048559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1049a1e78772SMel Gorman {
1050af0ed73eSJoonsoo Kim 	if (vma->vm_flags & VM_NORESERVE) {
1051af0ed73eSJoonsoo Kim 		/*
1052af0ed73eSJoonsoo Kim 		 * This address is already reserved by other process(chg == 0),
1053af0ed73eSJoonsoo Kim 		 * so, we should decrement reserved count. Without decrementing,
1054af0ed73eSJoonsoo Kim 		 * reserve count remains after releasing inode, because this
1055af0ed73eSJoonsoo Kim 		 * allocated page will go into page cache and is regarded as
1056af0ed73eSJoonsoo Kim 		 * coming from reserved pool in releasing step.  Currently, we
1057af0ed73eSJoonsoo Kim 		 * don't have any other solution to deal with this situation
1058af0ed73eSJoonsoo Kim 		 * properly, so add work-around here.
1059af0ed73eSJoonsoo Kim 		 */
1060af0ed73eSJoonsoo Kim 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1061559ec2f8SNicholas Krause 			return true;
1062af0ed73eSJoonsoo Kim 		else
1063559ec2f8SNicholas Krause 			return false;
1064af0ed73eSJoonsoo Kim 	}
1065a63884e9SJoonsoo Kim 
1066a63884e9SJoonsoo Kim 	/* Shared mappings always use reserves */
10671fb1b0e9SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE) {
10681fb1b0e9SMike Kravetz 		/*
10691fb1b0e9SMike Kravetz 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
10701fb1b0e9SMike Kravetz 		 * be a region map for all pages.  The only situation where
10711fb1b0e9SMike Kravetz 		 * there is no region map is if a hole was punched via
10727c8de358SEthon Paul 		 * fallocate.  In this case, there really are no reserves to
10731fb1b0e9SMike Kravetz 		 * use.  This situation is indicated if chg != 0.
10741fb1b0e9SMike Kravetz 		 */
10751fb1b0e9SMike Kravetz 		if (chg)
10761fb1b0e9SMike Kravetz 			return false;
10771fb1b0e9SMike Kravetz 		else
1078559ec2f8SNicholas Krause 			return true;
10791fb1b0e9SMike Kravetz 	}
1080a63884e9SJoonsoo Kim 
1081a63884e9SJoonsoo Kim 	/*
1082a63884e9SJoonsoo Kim 	 * Only the process that called mmap() has reserves for
1083a63884e9SJoonsoo Kim 	 * private mappings.
1084a63884e9SJoonsoo Kim 	 */
108567961f9dSMike Kravetz 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
108667961f9dSMike Kravetz 		/*
108767961f9dSMike Kravetz 		 * Like the shared case above, a hole punch or truncate
108867961f9dSMike Kravetz 		 * could have been performed on the private mapping.
108967961f9dSMike Kravetz 		 * Examine the value of chg to determine if reserves
109067961f9dSMike Kravetz 		 * actually exist or were previously consumed.
109167961f9dSMike Kravetz 		 * Very Subtle - The value of chg comes from a previous
109267961f9dSMike Kravetz 		 * call to vma_needs_reserves().  The reserve map for
109367961f9dSMike Kravetz 		 * private mappings has different (opposite) semantics
109467961f9dSMike Kravetz 		 * than that of shared mappings.  vma_needs_reserves()
109567961f9dSMike Kravetz 		 * has already taken this difference in semantics into
109667961f9dSMike Kravetz 		 * account.  Therefore, the meaning of chg is the same
109767961f9dSMike Kravetz 		 * as in the shared case above.  Code could easily be
109867961f9dSMike Kravetz 		 * combined, but keeping it separate draws attention to
109967961f9dSMike Kravetz 		 * subtle differences.
110067961f9dSMike Kravetz 		 */
110167961f9dSMike Kravetz 		if (chg)
110267961f9dSMike Kravetz 			return false;
110367961f9dSMike Kravetz 		else
1104559ec2f8SNicholas Krause 			return true;
110567961f9dSMike Kravetz 	}
1106a63884e9SJoonsoo Kim 
1107559ec2f8SNicholas Krause 	return false;
1108a1e78772SMel Gorman }
1109a1e78772SMel Gorman 
1110a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
11111da177e4SLinus Torvalds {
11121da177e4SLinus Torvalds 	int nid = page_to_nid(page);
11139487ca60SMike Kravetz 
11149487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1115b65a4edaSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1116b65a4edaSMike Kravetz 
11170edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1118a5516438SAndi Kleen 	h->free_huge_pages++;
1119a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
11206c037149SMike Kravetz 	SetHPageFreed(page);
11211da177e4SLinus Torvalds }
11221da177e4SLinus Torvalds 
112394310cbcSAnshuman Khandual static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1124bf50bab2SNaoya Horiguchi {
1125bf50bab2SNaoya Horiguchi 	struct page *page;
11261a08ae36SPavel Tatashin 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1127bf50bab2SNaoya Horiguchi 
11289487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1129bbe88753SJoonsoo Kim 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
11306077c943SAlex Sierra 		if (pin && !is_longterm_pinnable_page(page))
1131bbe88753SJoonsoo Kim 			continue;
1132bbe88753SJoonsoo Kim 
11336664bfc8SWei Yang 		if (PageHWPoison(page))
11346664bfc8SWei Yang 			continue;
1135bbe88753SJoonsoo Kim 
11360edaecfaSAneesh Kumar K.V 		list_move(&page->lru, &h->hugepage_activelist);
1137a9869b83SNaoya Horiguchi 		set_page_refcounted(page);
11386c037149SMike Kravetz 		ClearHPageFreed(page);
1139bf50bab2SNaoya Horiguchi 		h->free_huge_pages--;
1140bf50bab2SNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
1141bf50bab2SNaoya Horiguchi 		return page;
1142bf50bab2SNaoya Horiguchi 	}
1143bf50bab2SNaoya Horiguchi 
11446664bfc8SWei Yang 	return NULL;
11456664bfc8SWei Yang }
11466664bfc8SWei Yang 
11473e59fcb0SMichal Hocko static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
11483e59fcb0SMichal Hocko 		nodemask_t *nmask)
114994310cbcSAnshuman Khandual {
11503e59fcb0SMichal Hocko 	unsigned int cpuset_mems_cookie;
11513e59fcb0SMichal Hocko 	struct zonelist *zonelist;
11523e59fcb0SMichal Hocko 	struct zone *zone;
11533e59fcb0SMichal Hocko 	struct zoneref *z;
115498fa15f3SAnshuman Khandual 	int node = NUMA_NO_NODE;
11553e59fcb0SMichal Hocko 
11563e59fcb0SMichal Hocko 	zonelist = node_zonelist(nid, gfp_mask);
11573e59fcb0SMichal Hocko 
11583e59fcb0SMichal Hocko retry_cpuset:
11593e59fcb0SMichal Hocko 	cpuset_mems_cookie = read_mems_allowed_begin();
11603e59fcb0SMichal Hocko 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
116194310cbcSAnshuman Khandual 		struct page *page;
116294310cbcSAnshuman Khandual 
11633e59fcb0SMichal Hocko 		if (!cpuset_zone_allowed(zone, gfp_mask))
11643e59fcb0SMichal Hocko 			continue;
11653e59fcb0SMichal Hocko 		/*
11663e59fcb0SMichal Hocko 		 * no need to ask again on the same node. Pool is node rather than
11673e59fcb0SMichal Hocko 		 * zone aware
11683e59fcb0SMichal Hocko 		 */
11693e59fcb0SMichal Hocko 		if (zone_to_nid(zone) == node)
11703e59fcb0SMichal Hocko 			continue;
11713e59fcb0SMichal Hocko 		node = zone_to_nid(zone);
117294310cbcSAnshuman Khandual 
117394310cbcSAnshuman Khandual 		page = dequeue_huge_page_node_exact(h, node);
117494310cbcSAnshuman Khandual 		if (page)
117594310cbcSAnshuman Khandual 			return page;
117694310cbcSAnshuman Khandual 	}
11773e59fcb0SMichal Hocko 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
11783e59fcb0SMichal Hocko 		goto retry_cpuset;
11793e59fcb0SMichal Hocko 
118094310cbcSAnshuman Khandual 	return NULL;
118194310cbcSAnshuman Khandual }
118294310cbcSAnshuman Khandual 
1183a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
1184a5516438SAndi Kleen 				struct vm_area_struct *vma,
1185af0ed73eSJoonsoo Kim 				unsigned long address, int avoid_reserve,
1186af0ed73eSJoonsoo Kim 				long chg)
11871da177e4SLinus Torvalds {
1188cfcaa66fSBen Widawsky 	struct page *page = NULL;
1189480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
119004ec6264SVlastimil Babka 	gfp_t gfp_mask;
11913e59fcb0SMichal Hocko 	nodemask_t *nodemask;
119204ec6264SVlastimil Babka 	int nid;
11931da177e4SLinus Torvalds 
1194a1e78772SMel Gorman 	/*
1195a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
1196a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
1197a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
1198a1e78772SMel Gorman 	 */
1199af0ed73eSJoonsoo Kim 	if (!vma_has_reserves(vma, chg) &&
1200a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
1201c0ff7453SMiao Xie 		goto err;
1202a1e78772SMel Gorman 
120304f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
1204a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
12056eab04a8SJustin P. Mattock 		goto err;
120604f2cbe3SMel Gorman 
120704ec6264SVlastimil Babka 	gfp_mask = htlb_alloc_mask(h);
120804ec6264SVlastimil Babka 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1209cfcaa66fSBen Widawsky 
1210cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
12113e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1212cfcaa66fSBen Widawsky 
1213cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
1214cfcaa66fSBen Widawsky 		nodemask = NULL;
1215cfcaa66fSBen Widawsky 	}
1216cfcaa66fSBen Widawsky 
1217cfcaa66fSBen Widawsky 	if (!page)
1218cfcaa66fSBen Widawsky 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1219cfcaa66fSBen Widawsky 
12203e59fcb0SMichal Hocko 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1221d6995da3SMike Kravetz 		SetHPageRestoreReserve(page);
1222a63884e9SJoonsoo Kim 		h->resv_huge_pages--;
1223bf50bab2SNaoya Horiguchi 	}
1224cc9a6c87SMel Gorman 
1225cc9a6c87SMel Gorman 	mpol_cond_put(mpol);
1226cc9a6c87SMel Gorman 	return page;
1227cc9a6c87SMel Gorman 
1228c0ff7453SMiao Xie err:
1229cc9a6c87SMel Gorman 	return NULL;
12301da177e4SLinus Torvalds }
12311da177e4SLinus Torvalds 
12321cac6f2cSLuiz Capitulino /*
12331cac6f2cSLuiz Capitulino  * common helper functions for hstate_next_node_to_{alloc|free}.
12341cac6f2cSLuiz Capitulino  * We may have allocated or freed a huge page based on a different
12351cac6f2cSLuiz Capitulino  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
12361cac6f2cSLuiz Capitulino  * be outside of *nodes_allowed.  Ensure that we use an allowed
12371cac6f2cSLuiz Capitulino  * node for alloc or free.
12381cac6f2cSLuiz Capitulino  */
12391cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
12401cac6f2cSLuiz Capitulino {
12410edaf86cSAndrew Morton 	nid = next_node_in(nid, *nodes_allowed);
12421cac6f2cSLuiz Capitulino 	VM_BUG_ON(nid >= MAX_NUMNODES);
12431cac6f2cSLuiz Capitulino 
12441cac6f2cSLuiz Capitulino 	return nid;
12451cac6f2cSLuiz Capitulino }
12461cac6f2cSLuiz Capitulino 
12471cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
12481cac6f2cSLuiz Capitulino {
12491cac6f2cSLuiz Capitulino 	if (!node_isset(nid, *nodes_allowed))
12501cac6f2cSLuiz Capitulino 		nid = next_node_allowed(nid, nodes_allowed);
12511cac6f2cSLuiz Capitulino 	return nid;
12521cac6f2cSLuiz Capitulino }
12531cac6f2cSLuiz Capitulino 
12541cac6f2cSLuiz Capitulino /*
12551cac6f2cSLuiz Capitulino  * returns the previously saved node ["this node"] from which to
12561cac6f2cSLuiz Capitulino  * allocate a persistent huge page for the pool and advance the
12571cac6f2cSLuiz Capitulino  * next node from which to allocate, handling wrap at end of node
12581cac6f2cSLuiz Capitulino  * mask.
12591cac6f2cSLuiz Capitulino  */
12601cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
12611cac6f2cSLuiz Capitulino 					nodemask_t *nodes_allowed)
12621cac6f2cSLuiz Capitulino {
12631cac6f2cSLuiz Capitulino 	int nid;
12641cac6f2cSLuiz Capitulino 
12651cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12661cac6f2cSLuiz Capitulino 
12671cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
12681cac6f2cSLuiz Capitulino 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
12691cac6f2cSLuiz Capitulino 
12701cac6f2cSLuiz Capitulino 	return nid;
12711cac6f2cSLuiz Capitulino }
12721cac6f2cSLuiz Capitulino 
12731cac6f2cSLuiz Capitulino /*
127410c6ec49SMike Kravetz  * helper for remove_pool_huge_page() - return the previously saved
12751cac6f2cSLuiz Capitulino  * node ["this node"] from which to free a huge page.  Advance the
12761cac6f2cSLuiz Capitulino  * next node id whether or not we find a free huge page to free so
12771cac6f2cSLuiz Capitulino  * that the next attempt to free addresses the next node.
12781cac6f2cSLuiz Capitulino  */
12791cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
12801cac6f2cSLuiz Capitulino {
12811cac6f2cSLuiz Capitulino 	int nid;
12821cac6f2cSLuiz Capitulino 
12831cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12841cac6f2cSLuiz Capitulino 
12851cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
12861cac6f2cSLuiz Capitulino 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
12871cac6f2cSLuiz Capitulino 
12881cac6f2cSLuiz Capitulino 	return nid;
12891cac6f2cSLuiz Capitulino }
12901cac6f2cSLuiz Capitulino 
12911cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
12921cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
12931cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
12941cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
12951cac6f2cSLuiz Capitulino 		nr_nodes--)
12961cac6f2cSLuiz Capitulino 
12971cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
12981cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
12991cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
13001cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
13011cac6f2cSLuiz Capitulino 		nr_nodes--)
13021cac6f2cSLuiz Capitulino 
13038531fc6fSMike Kravetz /* used to demote non-gigantic_huge pages as well */
130434d9e35bSMike Kravetz static void __destroy_compound_gigantic_page(struct page *page,
130534d9e35bSMike Kravetz 					unsigned int order, bool demote)
1306944d9fecSLuiz Capitulino {
1307944d9fecSLuiz Capitulino 	int i;
1308944d9fecSLuiz Capitulino 	int nr_pages = 1 << order;
1309*14455eabSCheng Li 	struct page *p;
1310944d9fecSLuiz Capitulino 
1311c8cc708aSGerald Schaefer 	atomic_set(compound_mapcount_ptr(page), 0);
131247e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
131347e29d32SJohn Hubbard 
1314*14455eabSCheng Li 	for (i = 1; i < nr_pages; i++) {
1315*14455eabSCheng Li 		p = nth_page(page, i);
1316a01f4390SMike Kravetz 		p->mapping = NULL;
13171d798ca3SKirill A. Shutemov 		clear_compound_head(p);
131834d9e35bSMike Kravetz 		if (!demote)
1319944d9fecSLuiz Capitulino 			set_page_refcounted(p);
1320944d9fecSLuiz Capitulino 	}
1321944d9fecSLuiz Capitulino 
1322944d9fecSLuiz Capitulino 	set_compound_order(page, 0);
13235232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
1324ba9c1201SGerald Schaefer 	page[1].compound_nr = 0;
13255232c63fSMatthew Wilcox (Oracle) #endif
1326944d9fecSLuiz Capitulino 	__ClearPageHead(page);
1327944d9fecSLuiz Capitulino }
1328944d9fecSLuiz Capitulino 
13298531fc6fSMike Kravetz static void destroy_compound_hugetlb_page_for_demote(struct page *page,
13308531fc6fSMike Kravetz 					unsigned int order)
13318531fc6fSMike Kravetz {
13328531fc6fSMike Kravetz 	__destroy_compound_gigantic_page(page, order, true);
13338531fc6fSMike Kravetz }
13348531fc6fSMike Kravetz 
13358531fc6fSMike Kravetz #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
133634d9e35bSMike Kravetz static void destroy_compound_gigantic_page(struct page *page,
133734d9e35bSMike Kravetz 					unsigned int order)
133834d9e35bSMike Kravetz {
133934d9e35bSMike Kravetz 	__destroy_compound_gigantic_page(page, order, false);
134034d9e35bSMike Kravetz }
134134d9e35bSMike Kravetz 
1342d00181b9SKirill A. Shutemov static void free_gigantic_page(struct page *page, unsigned int order)
1343944d9fecSLuiz Capitulino {
1344cf11e85fSRoman Gushchin 	/*
1345cf11e85fSRoman Gushchin 	 * If the page isn't allocated using the cma allocator,
1346cf11e85fSRoman Gushchin 	 * cma_release() returns false.
1347cf11e85fSRoman Gushchin 	 */
1348dbda8feaSBarry Song #ifdef CONFIG_CMA
1349dbda8feaSBarry Song 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1350cf11e85fSRoman Gushchin 		return;
1351dbda8feaSBarry Song #endif
1352cf11e85fSRoman Gushchin 
1353944d9fecSLuiz Capitulino 	free_contig_range(page_to_pfn(page), 1 << order);
1354944d9fecSLuiz Capitulino }
1355944d9fecSLuiz Capitulino 
13564eb0716eSAlexandre Ghiti #ifdef CONFIG_CONTIG_ALLOC
1357d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1358d9cc948fSMichal Hocko 		int nid, nodemask_t *nodemask)
1359944d9fecSLuiz Capitulino {
136004adbc3fSMiaohe Lin 	unsigned long nr_pages = pages_per_huge_page(h);
1361953f064aSLi Xinhai 	if (nid == NUMA_NO_NODE)
1362953f064aSLi Xinhai 		nid = numa_mem_id();
1363944d9fecSLuiz Capitulino 
1364dbda8feaSBarry Song #ifdef CONFIG_CMA
1365dbda8feaSBarry Song 	{
1366cf11e85fSRoman Gushchin 		struct page *page;
1367cf11e85fSRoman Gushchin 		int node;
1368cf11e85fSRoman Gushchin 
1369953f064aSLi Xinhai 		if (hugetlb_cma[nid]) {
1370953f064aSLi Xinhai 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1371953f064aSLi Xinhai 					huge_page_order(h), true);
1372953f064aSLi Xinhai 			if (page)
1373953f064aSLi Xinhai 				return page;
1374953f064aSLi Xinhai 		}
1375953f064aSLi Xinhai 
1376953f064aSLi Xinhai 		if (!(gfp_mask & __GFP_THISNODE)) {
1377cf11e85fSRoman Gushchin 			for_each_node_mask(node, *nodemask) {
1378953f064aSLi Xinhai 				if (node == nid || !hugetlb_cma[node])
1379cf11e85fSRoman Gushchin 					continue;
1380cf11e85fSRoman Gushchin 
1381cf11e85fSRoman Gushchin 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1382cf11e85fSRoman Gushchin 						huge_page_order(h), true);
1383cf11e85fSRoman Gushchin 				if (page)
1384cf11e85fSRoman Gushchin 					return page;
1385cf11e85fSRoman Gushchin 			}
1386cf11e85fSRoman Gushchin 		}
1387953f064aSLi Xinhai 	}
1388dbda8feaSBarry Song #endif
1389cf11e85fSRoman Gushchin 
13905e27a2dfSAnshuman Khandual 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1391944d9fecSLuiz Capitulino }
1392944d9fecSLuiz Capitulino 
13934eb0716eSAlexandre Ghiti #else /* !CONFIG_CONTIG_ALLOC */
13944eb0716eSAlexandre Ghiti static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
13954eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
13964eb0716eSAlexandre Ghiti {
13974eb0716eSAlexandre Ghiti 	return NULL;
13984eb0716eSAlexandre Ghiti }
13994eb0716eSAlexandre Ghiti #endif /* CONFIG_CONTIG_ALLOC */
1400944d9fecSLuiz Capitulino 
1401e1073d1eSAneesh Kumar K.V #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1402d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
14034eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
14044eb0716eSAlexandre Ghiti {
14054eb0716eSAlexandre Ghiti 	return NULL;
14064eb0716eSAlexandre Ghiti }
1407d00181b9SKirill A. Shutemov static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1408944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page,
1409d00181b9SKirill A. Shutemov 						unsigned int order) { }
1410944d9fecSLuiz Capitulino #endif
1411944d9fecSLuiz Capitulino 
14126eb4e88aSMike Kravetz /*
14136eb4e88aSMike Kravetz  * Remove hugetlb page from lists, and update dtor so that page appears
141434d9e35bSMike Kravetz  * as just a compound page.
141534d9e35bSMike Kravetz  *
141634d9e35bSMike Kravetz  * A reference is held on the page, except in the case of demote.
14176eb4e88aSMike Kravetz  *
14186eb4e88aSMike Kravetz  * Must be called with hugetlb lock held.
14196eb4e88aSMike Kravetz  */
142034d9e35bSMike Kravetz static void __remove_hugetlb_page(struct hstate *h, struct page *page,
142134d9e35bSMike Kravetz 							bool adjust_surplus,
142234d9e35bSMike Kravetz 							bool demote)
14236eb4e88aSMike Kravetz {
14246eb4e88aSMike Kravetz 	int nid = page_to_nid(page);
14256eb4e88aSMike Kravetz 
14266eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
14276eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
14286eb4e88aSMike Kravetz 
14299487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
14306eb4e88aSMike Kravetz 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
14316eb4e88aSMike Kravetz 		return;
14326eb4e88aSMike Kravetz 
14336eb4e88aSMike Kravetz 	list_del(&page->lru);
14346eb4e88aSMike Kravetz 
14356eb4e88aSMike Kravetz 	if (HPageFreed(page)) {
14366eb4e88aSMike Kravetz 		h->free_huge_pages--;
14376eb4e88aSMike Kravetz 		h->free_huge_pages_node[nid]--;
14386eb4e88aSMike Kravetz 	}
14396eb4e88aSMike Kravetz 	if (adjust_surplus) {
14406eb4e88aSMike Kravetz 		h->surplus_huge_pages--;
14416eb4e88aSMike Kravetz 		h->surplus_huge_pages_node[nid]--;
14426eb4e88aSMike Kravetz 	}
14436eb4e88aSMike Kravetz 
1444e32d20c0SMike Kravetz 	/*
1445e32d20c0SMike Kravetz 	 * Very subtle
1446e32d20c0SMike Kravetz 	 *
1447e32d20c0SMike Kravetz 	 * For non-gigantic pages set the destructor to the normal compound
1448e32d20c0SMike Kravetz 	 * page dtor.  This is needed in case someone takes an additional
1449e32d20c0SMike Kravetz 	 * temporary ref to the page, and freeing is delayed until they drop
1450e32d20c0SMike Kravetz 	 * their reference.
1451e32d20c0SMike Kravetz 	 *
1452e32d20c0SMike Kravetz 	 * For gigantic pages set the destructor to the null dtor.  This
1453e32d20c0SMike Kravetz 	 * destructor will never be called.  Before freeing the gigantic
1454e32d20c0SMike Kravetz 	 * page destroy_compound_gigantic_page will turn the compound page
1455e32d20c0SMike Kravetz 	 * into a simple group of pages.  After this the destructor does not
1456e32d20c0SMike Kravetz 	 * apply.
1457e32d20c0SMike Kravetz 	 *
1458e32d20c0SMike Kravetz 	 * This handles the case where more than one ref is held when and
1459e32d20c0SMike Kravetz 	 * after update_and_free_page is called.
146034d9e35bSMike Kravetz 	 *
146134d9e35bSMike Kravetz 	 * In the case of demote we do not ref count the page as it will soon
146234d9e35bSMike Kravetz 	 * be turned into a page of smaller size.
1463e32d20c0SMike Kravetz 	 */
146434d9e35bSMike Kravetz 	if (!demote)
14656eb4e88aSMike Kravetz 		set_page_refcounted(page);
1466e32d20c0SMike Kravetz 	if (hstate_is_gigantic(h))
14676eb4e88aSMike Kravetz 		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1468e32d20c0SMike Kravetz 	else
1469e32d20c0SMike Kravetz 		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
14706eb4e88aSMike Kravetz 
14716eb4e88aSMike Kravetz 	h->nr_huge_pages--;
14726eb4e88aSMike Kravetz 	h->nr_huge_pages_node[nid]--;
14736eb4e88aSMike Kravetz }
14746eb4e88aSMike Kravetz 
147534d9e35bSMike Kravetz static void remove_hugetlb_page(struct hstate *h, struct page *page,
147634d9e35bSMike Kravetz 							bool adjust_surplus)
147734d9e35bSMike Kravetz {
147834d9e35bSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, false);
147934d9e35bSMike Kravetz }
148034d9e35bSMike Kravetz 
14818531fc6fSMike Kravetz static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
14828531fc6fSMike Kravetz 							bool adjust_surplus)
14838531fc6fSMike Kravetz {
14848531fc6fSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, true);
14858531fc6fSMike Kravetz }
14868531fc6fSMike Kravetz 
1487ad2fa371SMuchun Song static void add_hugetlb_page(struct hstate *h, struct page *page,
1488ad2fa371SMuchun Song 			     bool adjust_surplus)
1489ad2fa371SMuchun Song {
1490ad2fa371SMuchun Song 	int zeroed;
1491ad2fa371SMuchun Song 	int nid = page_to_nid(page);
1492ad2fa371SMuchun Song 
1493ad2fa371SMuchun Song 	VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1494ad2fa371SMuchun Song 
1495ad2fa371SMuchun Song 	lockdep_assert_held(&hugetlb_lock);
1496ad2fa371SMuchun Song 
1497ad2fa371SMuchun Song 	INIT_LIST_HEAD(&page->lru);
1498ad2fa371SMuchun Song 	h->nr_huge_pages++;
1499ad2fa371SMuchun Song 	h->nr_huge_pages_node[nid]++;
1500ad2fa371SMuchun Song 
1501ad2fa371SMuchun Song 	if (adjust_surplus) {
1502ad2fa371SMuchun Song 		h->surplus_huge_pages++;
1503ad2fa371SMuchun Song 		h->surplus_huge_pages_node[nid]++;
1504ad2fa371SMuchun Song 	}
1505ad2fa371SMuchun Song 
1506ad2fa371SMuchun Song 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1507ad2fa371SMuchun Song 	set_page_private(page, 0);
1508a9e1eab2SMiaohe Lin 	/*
1509a9e1eab2SMiaohe Lin 	 * We have to set HPageVmemmapOptimized again as above
1510a9e1eab2SMiaohe Lin 	 * set_page_private(page, 0) cleared it.
1511a9e1eab2SMiaohe Lin 	 */
1512ad2fa371SMuchun Song 	SetHPageVmemmapOptimized(page);
1513ad2fa371SMuchun Song 
1514ad2fa371SMuchun Song 	/*
1515b65a4edaSMike Kravetz 	 * This page is about to be managed by the hugetlb allocator and
1516b65a4edaSMike Kravetz 	 * should have no users.  Drop our reference, and check for others
1517b65a4edaSMike Kravetz 	 * just in case.
1518ad2fa371SMuchun Song 	 */
1519ad2fa371SMuchun Song 	zeroed = put_page_testzero(page);
1520b65a4edaSMike Kravetz 	if (!zeroed)
1521b65a4edaSMike Kravetz 		/*
1522b65a4edaSMike Kravetz 		 * It is VERY unlikely soneone else has taken a ref on
1523b65a4edaSMike Kravetz 		 * the page.  In this case, we simply return as the
1524b65a4edaSMike Kravetz 		 * hugetlb destructor (free_huge_page) will be called
1525b65a4edaSMike Kravetz 		 * when this other ref is dropped.
1526b65a4edaSMike Kravetz 		 */
1527b65a4edaSMike Kravetz 		return;
1528b65a4edaSMike Kravetz 
1529ad2fa371SMuchun Song 	arch_clear_hugepage_flags(page);
1530ad2fa371SMuchun Song 	enqueue_huge_page(h, page);
1531ad2fa371SMuchun Song }
1532ad2fa371SMuchun Song 
1533b65d4adbSMuchun Song static void __update_and_free_page(struct hstate *h, struct page *page)
15346af2acb6SAdam Litke {
15356af2acb6SAdam Litke 	int i;
1536*14455eabSCheng Li 	struct page *subpage;
1537a5516438SAndi Kleen 
15384eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1539944d9fecSLuiz Capitulino 		return;
154018229df5SAndy Whitcroft 
1541161df60eSNaoya Horiguchi 	/*
1542161df60eSNaoya Horiguchi 	 * If we don't know which subpages are hwpoisoned, we can't free
1543161df60eSNaoya Horiguchi 	 * the hugepage, so it's leaked intentionally.
1544161df60eSNaoya Horiguchi 	 */
1545161df60eSNaoya Horiguchi 	if (HPageRawHwpUnreliable(page))
1546161df60eSNaoya Horiguchi 		return;
1547161df60eSNaoya Horiguchi 
15486213834cSMuchun Song 	if (hugetlb_vmemmap_restore(h, page)) {
1549ad2fa371SMuchun Song 		spin_lock_irq(&hugetlb_lock);
1550ad2fa371SMuchun Song 		/*
1551ad2fa371SMuchun Song 		 * If we cannot allocate vmemmap pages, just refuse to free the
1552ad2fa371SMuchun Song 		 * page and put the page back on the hugetlb free list and treat
1553ad2fa371SMuchun Song 		 * as a surplus page.
1554ad2fa371SMuchun Song 		 */
1555ad2fa371SMuchun Song 		add_hugetlb_page(h, page, true);
1556ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
1557ad2fa371SMuchun Song 		return;
1558ad2fa371SMuchun Song 	}
1559ad2fa371SMuchun Song 
1560161df60eSNaoya Horiguchi 	/*
1561161df60eSNaoya Horiguchi 	 * Move PageHWPoison flag from head page to the raw error pages,
1562161df60eSNaoya Horiguchi 	 * which makes any healthy subpages reusable.
1563161df60eSNaoya Horiguchi 	 */
1564161df60eSNaoya Horiguchi 	if (unlikely(PageHWPoison(page)))
1565161df60eSNaoya Horiguchi 		hugetlb_clear_page_hwpoison(page);
1566161df60eSNaoya Horiguchi 
1567*14455eabSCheng Li 	for (i = 0; i < pages_per_huge_page(h); i++) {
1568*14455eabSCheng Li 		subpage = nth_page(page, i);
1569dbfee5aeSMike Kravetz 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
157032f84528SChris Forbes 				1 << PG_referenced | 1 << PG_dirty |
1571a7407a27SLuiz Capitulino 				1 << PG_active | 1 << PG_private |
1572a7407a27SLuiz Capitulino 				1 << PG_writeback);
15736af2acb6SAdam Litke 	}
1574a01f4390SMike Kravetz 
1575a01f4390SMike Kravetz 	/*
1576a01f4390SMike Kravetz 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1577a01f4390SMike Kravetz 	 * need to be given back to CMA in free_gigantic_page.
1578a01f4390SMike Kravetz 	 */
1579a01f4390SMike Kravetz 	if (hstate_is_gigantic(h) ||
1580a01f4390SMike Kravetz 	    hugetlb_cma_page(page, huge_page_order(h))) {
1581944d9fecSLuiz Capitulino 		destroy_compound_gigantic_page(page, huge_page_order(h));
1582944d9fecSLuiz Capitulino 		free_gigantic_page(page, huge_page_order(h));
1583944d9fecSLuiz Capitulino 	} else {
1584a5516438SAndi Kleen 		__free_pages(page, huge_page_order(h));
15856af2acb6SAdam Litke 	}
1586944d9fecSLuiz Capitulino }
15876af2acb6SAdam Litke 
1588b65d4adbSMuchun Song /*
1589b65d4adbSMuchun Song  * As update_and_free_page() can be called under any context, so we cannot
1590b65d4adbSMuchun Song  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1591b65d4adbSMuchun Song  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1592b65d4adbSMuchun Song  * the vmemmap pages.
1593b65d4adbSMuchun Song  *
1594b65d4adbSMuchun Song  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1595b65d4adbSMuchun Song  * freed and frees them one-by-one. As the page->mapping pointer is going
1596b65d4adbSMuchun Song  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1597b65d4adbSMuchun Song  * structure of a lockless linked list of huge pages to be freed.
1598b65d4adbSMuchun Song  */
1599b65d4adbSMuchun Song static LLIST_HEAD(hpage_freelist);
1600b65d4adbSMuchun Song 
1601b65d4adbSMuchun Song static void free_hpage_workfn(struct work_struct *work)
1602b65d4adbSMuchun Song {
1603b65d4adbSMuchun Song 	struct llist_node *node;
1604b65d4adbSMuchun Song 
1605b65d4adbSMuchun Song 	node = llist_del_all(&hpage_freelist);
1606b65d4adbSMuchun Song 
1607b65d4adbSMuchun Song 	while (node) {
1608b65d4adbSMuchun Song 		struct page *page;
1609b65d4adbSMuchun Song 		struct hstate *h;
1610b65d4adbSMuchun Song 
1611b65d4adbSMuchun Song 		page = container_of((struct address_space **)node,
1612b65d4adbSMuchun Song 				     struct page, mapping);
1613b65d4adbSMuchun Song 		node = node->next;
1614b65d4adbSMuchun Song 		page->mapping = NULL;
1615b65d4adbSMuchun Song 		/*
1616b65d4adbSMuchun Song 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1617b65d4adbSMuchun Song 		 * is going to trigger because a previous call to
1618b65d4adbSMuchun Song 		 * remove_hugetlb_page() will set_compound_page_dtor(page,
1619b65d4adbSMuchun Song 		 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1620b65d4adbSMuchun Song 		 */
1621b65d4adbSMuchun Song 		h = size_to_hstate(page_size(page));
1622b65d4adbSMuchun Song 
1623b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1624b65d4adbSMuchun Song 
1625b65d4adbSMuchun Song 		cond_resched();
1626b65d4adbSMuchun Song 	}
1627b65d4adbSMuchun Song }
1628b65d4adbSMuchun Song static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1629b65d4adbSMuchun Song 
1630b65d4adbSMuchun Song static inline void flush_free_hpage_work(struct hstate *h)
1631b65d4adbSMuchun Song {
16326213834cSMuchun Song 	if (hugetlb_vmemmap_optimizable(h))
1633b65d4adbSMuchun Song 		flush_work(&free_hpage_work);
1634b65d4adbSMuchun Song }
1635b65d4adbSMuchun Song 
1636b65d4adbSMuchun Song static void update_and_free_page(struct hstate *h, struct page *page,
1637b65d4adbSMuchun Song 				 bool atomic)
1638b65d4adbSMuchun Song {
1639ad2fa371SMuchun Song 	if (!HPageVmemmapOptimized(page) || !atomic) {
1640b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1641b65d4adbSMuchun Song 		return;
1642b65d4adbSMuchun Song 	}
1643b65d4adbSMuchun Song 
1644b65d4adbSMuchun Song 	/*
1645b65d4adbSMuchun Song 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1646b65d4adbSMuchun Song 	 *
1647b65d4adbSMuchun Song 	 * Only call schedule_work() if hpage_freelist is previously
1648b65d4adbSMuchun Song 	 * empty. Otherwise, schedule_work() had been called but the workfn
1649b65d4adbSMuchun Song 	 * hasn't retrieved the list yet.
1650b65d4adbSMuchun Song 	 */
1651b65d4adbSMuchun Song 	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1652b65d4adbSMuchun Song 		schedule_work(&free_hpage_work);
1653b65d4adbSMuchun Song }
1654b65d4adbSMuchun Song 
165510c6ec49SMike Kravetz static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
165610c6ec49SMike Kravetz {
165710c6ec49SMike Kravetz 	struct page *page, *t_page;
165810c6ec49SMike Kravetz 
165910c6ec49SMike Kravetz 	list_for_each_entry_safe(page, t_page, list, lru) {
1660b65d4adbSMuchun Song 		update_and_free_page(h, page, false);
166110c6ec49SMike Kravetz 		cond_resched();
166210c6ec49SMike Kravetz 	}
166310c6ec49SMike Kravetz }
166410c6ec49SMike Kravetz 
1665e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1666e5ff2159SAndi Kleen {
1667e5ff2159SAndi Kleen 	struct hstate *h;
1668e5ff2159SAndi Kleen 
1669e5ff2159SAndi Kleen 	for_each_hstate(h) {
1670e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
1671e5ff2159SAndi Kleen 			return h;
1672e5ff2159SAndi Kleen 	}
1673e5ff2159SAndi Kleen 	return NULL;
1674e5ff2159SAndi Kleen }
1675e5ff2159SAndi Kleen 
1676db71ef79SMike Kravetz void free_huge_page(struct page *page)
167727a85ef1SDavid Gibson {
1678a5516438SAndi Kleen 	/*
1679a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
1680a5516438SAndi Kleen 	 * compound page destructor.
1681a5516438SAndi Kleen 	 */
1682e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
16837893d1d5SAdam Litke 	int nid = page_to_nid(page);
1684d6995da3SMike Kravetz 	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
168507443a85SJoonsoo Kim 	bool restore_reserve;
1686db71ef79SMike Kravetz 	unsigned long flags;
168727a85ef1SDavid Gibson 
1688b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1689b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_mapcount(page), page);
16908ace22bcSYongkai Wu 
1691d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
169278fbe906SDavid Hildenbrand 	if (PageAnon(page))
169378fbe906SDavid Hildenbrand 		__ClearPageAnonExclusive(page);
16948ace22bcSYongkai Wu 	page->mapping = NULL;
1695d6995da3SMike Kravetz 	restore_reserve = HPageRestoreReserve(page);
1696d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
169727a85ef1SDavid Gibson 
16981c5ecae3SMike Kravetz 	/*
1699d6995da3SMike Kravetz 	 * If HPageRestoreReserve was set on page, page allocation consumed a
17000919e1b6SMike Kravetz 	 * reservation.  If the page was associated with a subpool, there
17010919e1b6SMike Kravetz 	 * would have been a page reserved in the subpool before allocation
17020919e1b6SMike Kravetz 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
17036c26d310SMiaohe Lin 	 * reservation, do not call hugepage_subpool_put_pages() as this will
17040919e1b6SMike Kravetz 	 * remove the reserved page from the subpool.
17050919e1b6SMike Kravetz 	 */
17060919e1b6SMike Kravetz 	if (!restore_reserve) {
17070919e1b6SMike Kravetz 		/*
17080919e1b6SMike Kravetz 		 * A return code of zero implies that the subpool will be
17090919e1b6SMike Kravetz 		 * under its minimum size if the reservation is not restored
17100919e1b6SMike Kravetz 		 * after page is free.  Therefore, force restore_reserve
17110919e1b6SMike Kravetz 		 * operation.
17121c5ecae3SMike Kravetz 		 */
17131c5ecae3SMike Kravetz 		if (hugepage_subpool_put_pages(spool, 1) == 0)
17141c5ecae3SMike Kravetz 			restore_reserve = true;
17150919e1b6SMike Kravetz 	}
17161c5ecae3SMike Kravetz 
1717db71ef79SMike Kravetz 	spin_lock_irqsave(&hugetlb_lock, flags);
17188f251a3dSMike Kravetz 	ClearHPageMigratable(page);
17196d76dcf4SAneesh Kumar K.V 	hugetlb_cgroup_uncharge_page(hstate_index(h),
17206d76dcf4SAneesh Kumar K.V 				     pages_per_huge_page(h), page);
172108cf9fafSMina Almasry 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
172208cf9fafSMina Almasry 					  pages_per_huge_page(h), page);
172307443a85SJoonsoo Kim 	if (restore_reserve)
172407443a85SJoonsoo Kim 		h->resv_huge_pages++;
172507443a85SJoonsoo Kim 
17269157c311SMike Kravetz 	if (HPageTemporary(page)) {
17276eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, false);
1728db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1729b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
1730ab5ac90aSMichal Hocko 	} else if (h->surplus_huge_pages_node[nid]) {
17310edaecfaSAneesh Kumar K.V 		/* remove the page from active list */
17326eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, true);
1733db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1734b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
17357893d1d5SAdam Litke 	} else {
17365d3a551cSWill Deacon 		arch_clear_hugepage_flags(page);
1737a5516438SAndi Kleen 		enqueue_huge_page(h, page);
1738db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
173927a85ef1SDavid Gibson 	}
17401121828aSMike Kravetz }
174127a85ef1SDavid Gibson 
1742d3d99fccSOscar Salvador /*
1743d3d99fccSOscar Salvador  * Must be called with the hugetlb lock held
1744d3d99fccSOscar Salvador  */
1745d3d99fccSOscar Salvador static void __prep_account_new_huge_page(struct hstate *h, int nid)
1746d3d99fccSOscar Salvador {
1747d3d99fccSOscar Salvador 	lockdep_assert_held(&hugetlb_lock);
1748d3d99fccSOscar Salvador 	h->nr_huge_pages++;
1749d3d99fccSOscar Salvador 	h->nr_huge_pages_node[nid]++;
1750d3d99fccSOscar Salvador }
1751d3d99fccSOscar Salvador 
1752f41f2ed4SMuchun Song static void __prep_new_huge_page(struct hstate *h, struct page *page)
1753b7ba30c6SAndi Kleen {
17546213834cSMuchun Song 	hugetlb_vmemmap_optimize(h, page);
17550edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&page->lru);
1756f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1757ff546117SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
17589dd540e2SAneesh Kumar K.V 	set_hugetlb_cgroup(page, NULL);
17591adc4d41SMina Almasry 	set_hugetlb_cgroup_rsvd(page, NULL);
1760d3d99fccSOscar Salvador }
1761d3d99fccSOscar Salvador 
1762d3d99fccSOscar Salvador static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1763d3d99fccSOscar Salvador {
1764f41f2ed4SMuchun Song 	__prep_new_huge_page(h, page);
1765db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
1766d3d99fccSOscar Salvador 	__prep_account_new_huge_page(h, nid);
1767db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
1768b7ba30c6SAndi Kleen }
1769b7ba30c6SAndi Kleen 
177034d9e35bSMike Kravetz static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
177134d9e35bSMike Kravetz 								bool demote)
177220a0307cSWu Fengguang {
17737118fc29SMike Kravetz 	int i, j;
177420a0307cSWu Fengguang 	int nr_pages = 1 << order;
1775*14455eabSCheng Li 	struct page *p;
177620a0307cSWu Fengguang 
177720a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
177820a0307cSWu Fengguang 	set_compound_order(page, order);
1779ef5a22beSAndrea Arcangeli 	__ClearPageReserved(page);
1780de09d31dSKirill A. Shutemov 	__SetPageHead(page);
1781*14455eabSCheng Li 	for (i = 1; i < nr_pages; i++) {
1782*14455eabSCheng Li 		p = nth_page(page, i);
1783*14455eabSCheng Li 
1784ef5a22beSAndrea Arcangeli 		/*
1785ef5a22beSAndrea Arcangeli 		 * For gigantic hugepages allocated through bootmem at
1786ef5a22beSAndrea Arcangeli 		 * boot, it's safer to be consistent with the not-gigantic
1787ef5a22beSAndrea Arcangeli 		 * hugepages and clear the PG_reserved bit from all tail pages
17887c8de358SEthon Paul 		 * too.  Otherwise drivers using get_user_pages() to access tail
1789ef5a22beSAndrea Arcangeli 		 * pages may get the reference counting wrong if they see
1790ef5a22beSAndrea Arcangeli 		 * PG_reserved set on a tail page (despite the head page not
1791ef5a22beSAndrea Arcangeli 		 * having PG_reserved set).  Enforcing this consistency between
1792ef5a22beSAndrea Arcangeli 		 * head and tail pages allows drivers to optimize away a check
1793ef5a22beSAndrea Arcangeli 		 * on the head page when they need know if put_page() is needed
1794ef5a22beSAndrea Arcangeli 		 * after get_user_pages().
1795ef5a22beSAndrea Arcangeli 		 */
1796ef5a22beSAndrea Arcangeli 		__ClearPageReserved(p);
17977118fc29SMike Kravetz 		/*
17987118fc29SMike Kravetz 		 * Subtle and very unlikely
17997118fc29SMike Kravetz 		 *
18007118fc29SMike Kravetz 		 * Gigantic 'page allocators' such as memblock or cma will
18017118fc29SMike Kravetz 		 * return a set of pages with each page ref counted.  We need
18027118fc29SMike Kravetz 		 * to turn this set of pages into a compound page with tail
18037118fc29SMike Kravetz 		 * page ref counts set to zero.  Code such as speculative page
18047118fc29SMike Kravetz 		 * cache adding could take a ref on a 'to be' tail page.
18057118fc29SMike Kravetz 		 * We need to respect any increased ref count, and only set
18067118fc29SMike Kravetz 		 * the ref count to zero if count is currently 1.  If count
1807416d85edSMike Kravetz 		 * is not 1, we return an error.  An error return indicates
1808416d85edSMike Kravetz 		 * the set of pages can not be converted to a gigantic page.
1809416d85edSMike Kravetz 		 * The caller who allocated the pages should then discard the
1810416d85edSMike Kravetz 		 * pages using the appropriate free interface.
181134d9e35bSMike Kravetz 		 *
181234d9e35bSMike Kravetz 		 * In the case of demote, the ref count will be zero.
18137118fc29SMike Kravetz 		 */
181434d9e35bSMike Kravetz 		if (!demote) {
18157118fc29SMike Kravetz 			if (!page_ref_freeze(p, 1)) {
1816416d85edSMike Kravetz 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
18177118fc29SMike Kravetz 				goto out_error;
18187118fc29SMike Kravetz 			}
181934d9e35bSMike Kravetz 		} else {
182034d9e35bSMike Kravetz 			VM_BUG_ON_PAGE(page_count(p), p);
182134d9e35bSMike Kravetz 		}
18221d798ca3SKirill A. Shutemov 		set_compound_head(p, page);
182320a0307cSWu Fengguang 	}
1824b4330afbSMike Kravetz 	atomic_set(compound_mapcount_ptr(page), -1);
182547e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
18267118fc29SMike Kravetz 	return true;
18277118fc29SMike Kravetz 
18287118fc29SMike Kravetz out_error:
18297118fc29SMike Kravetz 	/* undo tail page modifications made above */
1830*14455eabSCheng Li 	for (j = 1; j < i; j++) {
1831*14455eabSCheng Li 		p = nth_page(page, j);
18327118fc29SMike Kravetz 		clear_compound_head(p);
18337118fc29SMike Kravetz 		set_page_refcounted(p);
18347118fc29SMike Kravetz 	}
18357118fc29SMike Kravetz 	/* need to clear PG_reserved on remaining tail pages  */
1836*14455eabSCheng Li 	for (; j < nr_pages; j++) {
1837*14455eabSCheng Li 		p = nth_page(page, j);
18387118fc29SMike Kravetz 		__ClearPageReserved(p);
1839*14455eabSCheng Li 	}
18407118fc29SMike Kravetz 	set_compound_order(page, 0);
18415232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
18427118fc29SMike Kravetz 	page[1].compound_nr = 0;
18435232c63fSMatthew Wilcox (Oracle) #endif
18447118fc29SMike Kravetz 	__ClearPageHead(page);
18457118fc29SMike Kravetz 	return false;
184620a0307cSWu Fengguang }
184720a0307cSWu Fengguang 
184834d9e35bSMike Kravetz static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
184934d9e35bSMike Kravetz {
185034d9e35bSMike Kravetz 	return __prep_compound_gigantic_page(page, order, false);
185134d9e35bSMike Kravetz }
185234d9e35bSMike Kravetz 
18538531fc6fSMike Kravetz static bool prep_compound_gigantic_page_for_demote(struct page *page,
18548531fc6fSMike Kravetz 							unsigned int order)
18558531fc6fSMike Kravetz {
18568531fc6fSMike Kravetz 	return __prep_compound_gigantic_page(page, order, true);
18578531fc6fSMike Kravetz }
18588531fc6fSMike Kravetz 
18597795912cSAndrew Morton /*
18607795912cSAndrew Morton  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
18617795912cSAndrew Morton  * transparent huge pages.  See the PageTransHuge() documentation for more
18627795912cSAndrew Morton  * details.
18637795912cSAndrew Morton  */
186420a0307cSWu Fengguang int PageHuge(struct page *page)
186520a0307cSWu Fengguang {
186620a0307cSWu Fengguang 	if (!PageCompound(page))
186720a0307cSWu Fengguang 		return 0;
186820a0307cSWu Fengguang 
186920a0307cSWu Fengguang 	page = compound_head(page);
1870f1e61557SKirill A. Shutemov 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
187120a0307cSWu Fengguang }
187243131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge);
187343131e14SNaoya Horiguchi 
187427c73ae7SAndrea Arcangeli /*
187527c73ae7SAndrea Arcangeli  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
187627c73ae7SAndrea Arcangeli  * normal or transparent huge pages.
187727c73ae7SAndrea Arcangeli  */
187827c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head)
187927c73ae7SAndrea Arcangeli {
188027c73ae7SAndrea Arcangeli 	if (!PageHead(page_head))
188127c73ae7SAndrea Arcangeli 		return 0;
188227c73ae7SAndrea Arcangeli 
1883d4af73e3SVlastimil Babka 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
188427c73ae7SAndrea Arcangeli }
18854e936eccSDavid Howells EXPORT_SYMBOL_GPL(PageHeadHuge);
188627c73ae7SAndrea Arcangeli 
1887c0d0381aSMike Kravetz /*
1888c0d0381aSMike Kravetz  * Find and lock address space (mapping) in write mode.
1889c0d0381aSMike Kravetz  *
1890336bf30eSMike Kravetz  * Upon entry, the page is locked which means that page_mapping() is
1891336bf30eSMike Kravetz  * stable.  Due to locking order, we can only trylock_write.  If we can
1892336bf30eSMike Kravetz  * not get the lock, simply return NULL to caller.
1893c0d0381aSMike Kravetz  */
1894c0d0381aSMike Kravetz struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1895c0d0381aSMike Kravetz {
1896336bf30eSMike Kravetz 	struct address_space *mapping = page_mapping(hpage);
1897c0d0381aSMike Kravetz 
1898c0d0381aSMike Kravetz 	if (!mapping)
1899c0d0381aSMike Kravetz 		return mapping;
1900c0d0381aSMike Kravetz 
1901c0d0381aSMike Kravetz 	if (i_mmap_trylock_write(mapping))
1902c0d0381aSMike Kravetz 		return mapping;
1903c0d0381aSMike Kravetz 
1904c0d0381aSMike Kravetz 	return NULL;
1905c0d0381aSMike Kravetz }
1906c0d0381aSMike Kravetz 
1907fe19bd3dSHugh Dickins pgoff_t hugetlb_basepage_index(struct page *page)
190813d60f4bSZhang Yi {
190913d60f4bSZhang Yi 	struct page *page_head = compound_head(page);
191013d60f4bSZhang Yi 	pgoff_t index = page_index(page_head);
191113d60f4bSZhang Yi 	unsigned long compound_idx;
191213d60f4bSZhang Yi 
191313d60f4bSZhang Yi 	if (compound_order(page_head) >= MAX_ORDER)
191413d60f4bSZhang Yi 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
191513d60f4bSZhang Yi 	else
191613d60f4bSZhang Yi 		compound_idx = page - page_head;
191713d60f4bSZhang Yi 
191813d60f4bSZhang Yi 	return (index << compound_order(page_head)) + compound_idx;
191913d60f4bSZhang Yi }
192013d60f4bSZhang Yi 
19210c397daeSMichal Hocko static struct page *alloc_buddy_huge_page(struct hstate *h,
1922f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1923f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19241da177e4SLinus Torvalds {
1925af0fb9dfSMichal Hocko 	int order = huge_page_order(h);
19261da177e4SLinus Torvalds 	struct page *page;
1927f60858f9SMike Kravetz 	bool alloc_try_hard = true;
1928f96efd58SJoe Jin 
1929f60858f9SMike Kravetz 	/*
1930f60858f9SMike Kravetz 	 * By default we always try hard to allocate the page with
1931f60858f9SMike Kravetz 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1932f60858f9SMike Kravetz 	 * a loop (to adjust global huge page counts) and previous allocation
1933f60858f9SMike Kravetz 	 * failed, do not continue to try hard on the same node.  Use the
1934f60858f9SMike Kravetz 	 * node_alloc_noretry bitmap to manage this state information.
1935f60858f9SMike Kravetz 	 */
1936f60858f9SMike Kravetz 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1937f60858f9SMike Kravetz 		alloc_try_hard = false;
1938f60858f9SMike Kravetz 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1939f60858f9SMike Kravetz 	if (alloc_try_hard)
1940f60858f9SMike Kravetz 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1941af0fb9dfSMichal Hocko 	if (nid == NUMA_NO_NODE)
1942af0fb9dfSMichal Hocko 		nid = numa_mem_id();
194384172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1944af0fb9dfSMichal Hocko 	if (page)
1945af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1946af0fb9dfSMichal Hocko 	else
1947af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
194863b4613cSNishanth Aravamudan 
1949f60858f9SMike Kravetz 	/*
1950f60858f9SMike Kravetz 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1951f60858f9SMike Kravetz 	 * indicates an overall state change.  Clear bit so that we resume
1952f60858f9SMike Kravetz 	 * normal 'try hard' allocations.
1953f60858f9SMike Kravetz 	 */
1954f60858f9SMike Kravetz 	if (node_alloc_noretry && page && !alloc_try_hard)
1955f60858f9SMike Kravetz 		node_clear(nid, *node_alloc_noretry);
1956f60858f9SMike Kravetz 
1957f60858f9SMike Kravetz 	/*
1958f60858f9SMike Kravetz 	 * If we tried hard to get a page but failed, set bit so that
1959f60858f9SMike Kravetz 	 * subsequent attempts will not try as hard until there is an
1960f60858f9SMike Kravetz 	 * overall state change.
1961f60858f9SMike Kravetz 	 */
1962f60858f9SMike Kravetz 	if (node_alloc_noretry && !page && alloc_try_hard)
1963f60858f9SMike Kravetz 		node_set(nid, *node_alloc_noretry);
1964f60858f9SMike Kravetz 
196563b4613cSNishanth Aravamudan 	return page;
196663b4613cSNishanth Aravamudan }
196763b4613cSNishanth Aravamudan 
1968af0fb9dfSMichal Hocko /*
19690c397daeSMichal Hocko  * Common helper to allocate a fresh hugetlb page. All specific allocators
19700c397daeSMichal Hocko  * should use this function to get new hugetlb pages
19710c397daeSMichal Hocko  */
19720c397daeSMichal Hocko static struct page *alloc_fresh_huge_page(struct hstate *h,
1973f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1974f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19750c397daeSMichal Hocko {
19760c397daeSMichal Hocko 	struct page *page;
19777118fc29SMike Kravetz 	bool retry = false;
19780c397daeSMichal Hocko 
19797118fc29SMike Kravetz retry:
19800c397daeSMichal Hocko 	if (hstate_is_gigantic(h))
19810c397daeSMichal Hocko 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
19820c397daeSMichal Hocko 	else
19830c397daeSMichal Hocko 		page = alloc_buddy_huge_page(h, gfp_mask,
1984f60858f9SMike Kravetz 				nid, nmask, node_alloc_noretry);
19850c397daeSMichal Hocko 	if (!page)
19860c397daeSMichal Hocko 		return NULL;
19870c397daeSMichal Hocko 
19887118fc29SMike Kravetz 	if (hstate_is_gigantic(h)) {
19897118fc29SMike Kravetz 		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
19907118fc29SMike Kravetz 			/*
19917118fc29SMike Kravetz 			 * Rare failure to convert pages to compound page.
19927118fc29SMike Kravetz 			 * Free pages and try again - ONCE!
19937118fc29SMike Kravetz 			 */
19947118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
19957118fc29SMike Kravetz 			if (!retry) {
19967118fc29SMike Kravetz 				retry = true;
19977118fc29SMike Kravetz 				goto retry;
19987118fc29SMike Kravetz 			}
19997118fc29SMike Kravetz 			return NULL;
20007118fc29SMike Kravetz 		}
20017118fc29SMike Kravetz 	}
20020c397daeSMichal Hocko 	prep_new_huge_page(h, page, page_to_nid(page));
20030c397daeSMichal Hocko 
20040c397daeSMichal Hocko 	return page;
20050c397daeSMichal Hocko }
20060c397daeSMichal Hocko 
20070c397daeSMichal Hocko /*
2008af0fb9dfSMichal Hocko  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2009af0fb9dfSMichal Hocko  * manner.
2010af0fb9dfSMichal Hocko  */
2011f60858f9SMike Kravetz static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2012f60858f9SMike Kravetz 				nodemask_t *node_alloc_noretry)
2013b2261026SJoonsoo Kim {
2014b2261026SJoonsoo Kim 	struct page *page;
2015b2261026SJoonsoo Kim 	int nr_nodes, node;
2016af0fb9dfSMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2017b2261026SJoonsoo Kim 
2018b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2019f60858f9SMike Kravetz 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
2020f60858f9SMike Kravetz 						node_alloc_noretry);
2021af0fb9dfSMichal Hocko 		if (page)
2022b2261026SJoonsoo Kim 			break;
2023b2261026SJoonsoo Kim 	}
2024b2261026SJoonsoo Kim 
2025af0fb9dfSMichal Hocko 	if (!page)
2026af0fb9dfSMichal Hocko 		return 0;
2027b2261026SJoonsoo Kim 
2028af0fb9dfSMichal Hocko 	put_page(page); /* free it into the hugepage allocator */
2029af0fb9dfSMichal Hocko 
2030af0fb9dfSMichal Hocko 	return 1;
2031b2261026SJoonsoo Kim }
2032b2261026SJoonsoo Kim 
2033e8c5c824SLee Schermerhorn /*
203410c6ec49SMike Kravetz  * Remove huge page from pool from next node to free.  Attempt to keep
203510c6ec49SMike Kravetz  * persistent huge pages more or less balanced over allowed nodes.
203610c6ec49SMike Kravetz  * This routine only 'removes' the hugetlb page.  The caller must make
203710c6ec49SMike Kravetz  * an additional call to free the page to low level allocators.
2038e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
2039e8c5c824SLee Schermerhorn  */
204010c6ec49SMike Kravetz static struct page *remove_pool_huge_page(struct hstate *h,
204110c6ec49SMike Kravetz 						nodemask_t *nodes_allowed,
20426ae11b27SLee Schermerhorn 						 bool acct_surplus)
2043e8c5c824SLee Schermerhorn {
2044b2261026SJoonsoo Kim 	int nr_nodes, node;
204510c6ec49SMike Kravetz 	struct page *page = NULL;
2046e8c5c824SLee Schermerhorn 
20479487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2048b2261026SJoonsoo Kim 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2049685f3457SLee Schermerhorn 		/*
2050685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
2051685f3457SLee Schermerhorn 		 * nodes with surplus pages.
2052685f3457SLee Schermerhorn 		 */
2053b2261026SJoonsoo Kim 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2054b2261026SJoonsoo Kim 		    !list_empty(&h->hugepage_freelists[node])) {
205510c6ec49SMike Kravetz 			page = list_entry(h->hugepage_freelists[node].next,
2056e8c5c824SLee Schermerhorn 					  struct page, lru);
20576eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, acct_surplus);
20589a76db09SLee Schermerhorn 			break;
2059e8c5c824SLee Schermerhorn 		}
2060b2261026SJoonsoo Kim 	}
2061e8c5c824SLee Schermerhorn 
206210c6ec49SMike Kravetz 	return page;
2063e8c5c824SLee Schermerhorn }
2064e8c5c824SLee Schermerhorn 
2065c8721bbbSNaoya Horiguchi /*
2066c8721bbbSNaoya Horiguchi  * Dissolve a given free hugepage into free buddy pages. This function does
2067faf53defSNaoya Horiguchi  * nothing for in-use hugepages and non-hugepages.
2068faf53defSNaoya Horiguchi  * This function returns values like below:
2069faf53defSNaoya Horiguchi  *
2070ad2fa371SMuchun Song  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2071ad2fa371SMuchun Song  *           when the system is under memory pressure and the feature of
2072ad2fa371SMuchun Song  *           freeing unused vmemmap pages associated with each hugetlb page
2073ad2fa371SMuchun Song  *           is enabled.
2074faf53defSNaoya Horiguchi  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2075faf53defSNaoya Horiguchi  *           (allocated or reserved.)
2076faf53defSNaoya Horiguchi  *       0:  successfully dissolved free hugepages or the page is not a
2077faf53defSNaoya Horiguchi  *           hugepage (considered as already dissolved)
2078c8721bbbSNaoya Horiguchi  */
2079c3114a84SAnshuman Khandual int dissolve_free_huge_page(struct page *page)
2080c8721bbbSNaoya Horiguchi {
20816bc9b564SNaoya Horiguchi 	int rc = -EBUSY;
2082082d5b6bSGerald Schaefer 
20837ffddd49SMuchun Song retry:
2084faf53defSNaoya Horiguchi 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2085faf53defSNaoya Horiguchi 	if (!PageHuge(page))
2086faf53defSNaoya Horiguchi 		return 0;
2087faf53defSNaoya Horiguchi 
2088db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2089faf53defSNaoya Horiguchi 	if (!PageHuge(page)) {
2090faf53defSNaoya Horiguchi 		rc = 0;
2091faf53defSNaoya Horiguchi 		goto out;
2092faf53defSNaoya Horiguchi 	}
2093faf53defSNaoya Horiguchi 
2094faf53defSNaoya Horiguchi 	if (!page_count(page)) {
20952247bb33SGerald Schaefer 		struct page *head = compound_head(page);
20962247bb33SGerald Schaefer 		struct hstate *h = page_hstate(head);
20976bc9b564SNaoya Horiguchi 		if (h->free_huge_pages - h->resv_huge_pages == 0)
2098082d5b6bSGerald Schaefer 			goto out;
20997ffddd49SMuchun Song 
21007ffddd49SMuchun Song 		/*
21017ffddd49SMuchun Song 		 * We should make sure that the page is already on the free list
21027ffddd49SMuchun Song 		 * when it is dissolved.
21037ffddd49SMuchun Song 		 */
21046c037149SMike Kravetz 		if (unlikely(!HPageFreed(head))) {
2105db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
21067ffddd49SMuchun Song 			cond_resched();
21077ffddd49SMuchun Song 
21087ffddd49SMuchun Song 			/*
21097ffddd49SMuchun Song 			 * Theoretically, we should return -EBUSY when we
21107ffddd49SMuchun Song 			 * encounter this race. In fact, we have a chance
21117ffddd49SMuchun Song 			 * to successfully dissolve the page if we do a
21127ffddd49SMuchun Song 			 * retry. Because the race window is quite small.
21137ffddd49SMuchun Song 			 * If we seize this opportunity, it is an optimization
21147ffddd49SMuchun Song 			 * for increasing the success rate of dissolving page.
21157ffddd49SMuchun Song 			 */
21167ffddd49SMuchun Song 			goto retry;
21177ffddd49SMuchun Song 		}
21187ffddd49SMuchun Song 
2119ad2fa371SMuchun Song 		remove_hugetlb_page(h, head, false);
2120ad2fa371SMuchun Song 		h->max_huge_pages--;
2121ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
2122ad2fa371SMuchun Song 
2123c3114a84SAnshuman Khandual 		/*
2124ad2fa371SMuchun Song 		 * Normally update_and_free_page will allocate required vmemmmap
2125ad2fa371SMuchun Song 		 * before freeing the page.  update_and_free_page will fail to
2126ad2fa371SMuchun Song 		 * free the page if it can not allocate required vmemmap.  We
2127ad2fa371SMuchun Song 		 * need to adjust max_huge_pages if the page is not freed.
2128ad2fa371SMuchun Song 		 * Attempt to allocate vmemmmap here so that we can take
2129ad2fa371SMuchun Song 		 * appropriate action on failure.
2130ad2fa371SMuchun Song 		 */
21316213834cSMuchun Song 		rc = hugetlb_vmemmap_restore(h, head);
2132ad2fa371SMuchun Song 		if (!rc) {
2133b65d4adbSMuchun Song 			update_and_free_page(h, head, false);
2134ad2fa371SMuchun Song 		} else {
2135ad2fa371SMuchun Song 			spin_lock_irq(&hugetlb_lock);
2136ad2fa371SMuchun Song 			add_hugetlb_page(h, head, false);
2137ad2fa371SMuchun Song 			h->max_huge_pages++;
2138ad2fa371SMuchun Song 			spin_unlock_irq(&hugetlb_lock);
2139ad2fa371SMuchun Song 		}
2140ad2fa371SMuchun Song 
2141ad2fa371SMuchun Song 		return rc;
2142c8721bbbSNaoya Horiguchi 	}
2143082d5b6bSGerald Schaefer out:
2144db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2145082d5b6bSGerald Schaefer 	return rc;
2146c8721bbbSNaoya Horiguchi }
2147c8721bbbSNaoya Horiguchi 
2148c8721bbbSNaoya Horiguchi /*
2149c8721bbbSNaoya Horiguchi  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2150c8721bbbSNaoya Horiguchi  * make specified memory blocks removable from the system.
21512247bb33SGerald Schaefer  * Note that this will dissolve a free gigantic hugepage completely, if any
21522247bb33SGerald Schaefer  * part of it lies within the given range.
2153082d5b6bSGerald Schaefer  * Also note that if dissolve_free_huge_page() returns with an error, all
2154082d5b6bSGerald Schaefer  * free hugepages that were dissolved before that error are lost.
2155c8721bbbSNaoya Horiguchi  */
2156082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2157c8721bbbSNaoya Horiguchi {
2158c8721bbbSNaoya Horiguchi 	unsigned long pfn;
2159eb03aa00SGerald Schaefer 	struct page *page;
2160082d5b6bSGerald Schaefer 	int rc = 0;
2161dc2628f3SMuchun Song 	unsigned int order;
2162dc2628f3SMuchun Song 	struct hstate *h;
2163c8721bbbSNaoya Horiguchi 
2164d0177639SLi Zhong 	if (!hugepages_supported())
2165082d5b6bSGerald Schaefer 		return rc;
2166d0177639SLi Zhong 
2167dc2628f3SMuchun Song 	order = huge_page_order(&default_hstate);
2168dc2628f3SMuchun Song 	for_each_hstate(h)
2169dc2628f3SMuchun Song 		order = min(order, huge_page_order(h));
2170dc2628f3SMuchun Song 
2171dc2628f3SMuchun Song 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2172eb03aa00SGerald Schaefer 		page = pfn_to_page(pfn);
2173eb03aa00SGerald Schaefer 		rc = dissolve_free_huge_page(page);
2174eb03aa00SGerald Schaefer 		if (rc)
2175082d5b6bSGerald Schaefer 			break;
2176eb03aa00SGerald Schaefer 	}
2177082d5b6bSGerald Schaefer 
2178082d5b6bSGerald Schaefer 	return rc;
2179c8721bbbSNaoya Horiguchi }
2180c8721bbbSNaoya Horiguchi 
2181ab5ac90aSMichal Hocko /*
2182ab5ac90aSMichal Hocko  * Allocates a fresh surplus page from the page allocator.
2183ab5ac90aSMichal Hocko  */
21840c397daeSMichal Hocko static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2185b65a4edaSMike Kravetz 		int nid, nodemask_t *nmask, bool zero_ref)
21867893d1d5SAdam Litke {
21879980d744SMichal Hocko 	struct page *page = NULL;
2188b65a4edaSMike Kravetz 	bool retry = false;
21897893d1d5SAdam Litke 
2190bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2191aa888a74SAndi Kleen 		return NULL;
2192aa888a74SAndi Kleen 
2193db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
21949980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
21959980d744SMichal Hocko 		goto out_unlock;
2196db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2197d1c3fb1fSNishanth Aravamudan 
2198b65a4edaSMike Kravetz retry:
2199f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
22009980d744SMichal Hocko 	if (!page)
22010c397daeSMichal Hocko 		return NULL;
2202d1c3fb1fSNishanth Aravamudan 
2203db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
22049980d744SMichal Hocko 	/*
22059980d744SMichal Hocko 	 * We could have raced with the pool size change.
22069980d744SMichal Hocko 	 * Double check that and simply deallocate the new page
22079980d744SMichal Hocko 	 * if we would end up overcommiting the surpluses. Abuse
22089980d744SMichal Hocko 	 * temporary page to workaround the nasty free_huge_page
22099980d744SMichal Hocko 	 * codeflow
22109980d744SMichal Hocko 	 */
22119980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
22129157c311SMike Kravetz 		SetHPageTemporary(page);
2213db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
22149980d744SMichal Hocko 		put_page(page);
22152bf753e6SKai Shen 		return NULL;
2216b65a4edaSMike Kravetz 	}
2217b65a4edaSMike Kravetz 
2218b65a4edaSMike Kravetz 	if (zero_ref) {
2219b65a4edaSMike Kravetz 		/*
2220b65a4edaSMike Kravetz 		 * Caller requires a page with zero ref count.
2221b65a4edaSMike Kravetz 		 * We will drop ref count here.  If someone else is holding
2222b65a4edaSMike Kravetz 		 * a ref, the page will be freed when they drop it.  Abuse
2223b65a4edaSMike Kravetz 		 * temporary page flag to accomplish this.
2224b65a4edaSMike Kravetz 		 */
2225b65a4edaSMike Kravetz 		SetHPageTemporary(page);
2226b65a4edaSMike Kravetz 		if (!put_page_testzero(page)) {
2227b65a4edaSMike Kravetz 			/*
2228b65a4edaSMike Kravetz 			 * Unexpected inflated ref count on freshly allocated
2229b65a4edaSMike Kravetz 			 * huge.  Retry once.
2230b65a4edaSMike Kravetz 			 */
2231b65a4edaSMike Kravetz 			pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2232b65a4edaSMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
2233b65a4edaSMike Kravetz 			if (retry)
2234b65a4edaSMike Kravetz 				return NULL;
2235b65a4edaSMike Kravetz 
2236b65a4edaSMike Kravetz 			retry = true;
2237b65a4edaSMike Kravetz 			goto retry;
2238b65a4edaSMike Kravetz 		}
2239b65a4edaSMike Kravetz 		ClearHPageTemporary(page);
2240b65a4edaSMike Kravetz 	}
2241b65a4edaSMike Kravetz 
22429980d744SMichal Hocko 	h->surplus_huge_pages++;
22434704dea3SMichal Hocko 	h->surplus_huge_pages_node[page_to_nid(page)]++;
22449980d744SMichal Hocko 
22459980d744SMichal Hocko out_unlock:
2246db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
22477893d1d5SAdam Litke 
22487893d1d5SAdam Litke 	return page;
22497893d1d5SAdam Litke }
22507893d1d5SAdam Litke 
2251bbe88753SJoonsoo Kim static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2252ab5ac90aSMichal Hocko 				     int nid, nodemask_t *nmask)
2253ab5ac90aSMichal Hocko {
2254ab5ac90aSMichal Hocko 	struct page *page;
2255ab5ac90aSMichal Hocko 
2256ab5ac90aSMichal Hocko 	if (hstate_is_gigantic(h))
2257ab5ac90aSMichal Hocko 		return NULL;
2258ab5ac90aSMichal Hocko 
2259f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2260ab5ac90aSMichal Hocko 	if (!page)
2261ab5ac90aSMichal Hocko 		return NULL;
2262ab5ac90aSMichal Hocko 
2263ab5ac90aSMichal Hocko 	/*
2264ab5ac90aSMichal Hocko 	 * We do not account these pages as surplus because they are only
2265ab5ac90aSMichal Hocko 	 * temporary and will be released properly on the last reference
2266ab5ac90aSMichal Hocko 	 */
22679157c311SMike Kravetz 	SetHPageTemporary(page);
2268ab5ac90aSMichal Hocko 
2269ab5ac90aSMichal Hocko 	return page;
2270ab5ac90aSMichal Hocko }
2271ab5ac90aSMichal Hocko 
2272e4e574b7SAdam Litke /*
2273099730d6SDave Hansen  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2274099730d6SDave Hansen  */
2275e0ec90eeSDave Hansen static
22760c397daeSMichal Hocko struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2277099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr)
2278099730d6SDave Hansen {
2279cfcaa66fSBen Widawsky 	struct page *page = NULL;
2280aaf14e40SMichal Hocko 	struct mempolicy *mpol;
2281aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
2282aaf14e40SMichal Hocko 	int nid;
2283aaf14e40SMichal Hocko 	nodemask_t *nodemask;
2284aaf14e40SMichal Hocko 
2285aaf14e40SMichal Hocko 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2286cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
2287cfcaa66fSBen Widawsky 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2288cfcaa66fSBen Widawsky 
2289cfcaa66fSBen Widawsky 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2290cfcaa66fSBen Widawsky 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
2291cfcaa66fSBen Widawsky 
2292cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
2293cfcaa66fSBen Widawsky 		nodemask = NULL;
2294cfcaa66fSBen Widawsky 	}
2295cfcaa66fSBen Widawsky 
2296cfcaa66fSBen Widawsky 	if (!page)
2297b65a4edaSMike Kravetz 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2298aaf14e40SMichal Hocko 	mpol_cond_put(mpol);
2299aaf14e40SMichal Hocko 	return page;
2300099730d6SDave Hansen }
2301099730d6SDave Hansen 
2302ab5ac90aSMichal Hocko /* page migration callback function */
23033e59fcb0SMichal Hocko struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2304d92bbc27SJoonsoo Kim 		nodemask_t *nmask, gfp_t gfp_mask)
23054db9b2efSMichal Hocko {
2306db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
23074db9b2efSMichal Hocko 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
23083e59fcb0SMichal Hocko 		struct page *page;
23093e59fcb0SMichal Hocko 
23103e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
23113e59fcb0SMichal Hocko 		if (page) {
2312db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
23133e59fcb0SMichal Hocko 			return page;
23144db9b2efSMichal Hocko 		}
23154db9b2efSMichal Hocko 	}
2316db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
23174db9b2efSMichal Hocko 
23180c397daeSMichal Hocko 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
23194db9b2efSMichal Hocko }
23204db9b2efSMichal Hocko 
2321ebd63723SMichal Hocko /* mempolicy aware migration callback */
2322389c8178SMichal Hocko struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2323389c8178SMichal Hocko 		unsigned long address)
2324ebd63723SMichal Hocko {
2325ebd63723SMichal Hocko 	struct mempolicy *mpol;
2326ebd63723SMichal Hocko 	nodemask_t *nodemask;
2327ebd63723SMichal Hocko 	struct page *page;
2328ebd63723SMichal Hocko 	gfp_t gfp_mask;
2329ebd63723SMichal Hocko 	int node;
2330ebd63723SMichal Hocko 
2331ebd63723SMichal Hocko 	gfp_mask = htlb_alloc_mask(h);
2332ebd63723SMichal Hocko 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2333d92bbc27SJoonsoo Kim 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2334ebd63723SMichal Hocko 	mpol_cond_put(mpol);
2335ebd63723SMichal Hocko 
2336ebd63723SMichal Hocko 	return page;
2337ebd63723SMichal Hocko }
2338ebd63723SMichal Hocko 
2339bf50bab2SNaoya Horiguchi /*
234025985edcSLucas De Marchi  * Increase the hugetlb pool such that it can accommodate a reservation
2341e4e574b7SAdam Litke  * of size 'delta'.
2342e4e574b7SAdam Litke  */
23430a4f3d1bSLiu Xiang static int gather_surplus_pages(struct hstate *h, long delta)
23441b2a1e7bSJules Irenge 	__must_hold(&hugetlb_lock)
2345e4e574b7SAdam Litke {
234634665341SMiaohe Lin 	LIST_HEAD(surplus_list);
2347e4e574b7SAdam Litke 	struct page *page, *tmp;
23480a4f3d1bSLiu Xiang 	int ret;
23490a4f3d1bSLiu Xiang 	long i;
23500a4f3d1bSLiu Xiang 	long needed, allocated;
235128073b02SHillf Danton 	bool alloc_ok = true;
2352e4e574b7SAdam Litke 
23539487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2354a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2355ac09b3a1SAdam Litke 	if (needed <= 0) {
2356a5516438SAndi Kleen 		h->resv_huge_pages += delta;
2357e4e574b7SAdam Litke 		return 0;
2358ac09b3a1SAdam Litke 	}
2359e4e574b7SAdam Litke 
2360e4e574b7SAdam Litke 	allocated = 0;
2361e4e574b7SAdam Litke 
2362e4e574b7SAdam Litke 	ret = -ENOMEM;
2363e4e574b7SAdam Litke retry:
2364db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2365e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
23660c397daeSMichal Hocko 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2367b65a4edaSMike Kravetz 				NUMA_NO_NODE, NULL, true);
236828073b02SHillf Danton 		if (!page) {
236928073b02SHillf Danton 			alloc_ok = false;
237028073b02SHillf Danton 			break;
237128073b02SHillf Danton 		}
2372e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
237369ed779aSDavid Rientjes 		cond_resched();
2374e4e574b7SAdam Litke 	}
237528073b02SHillf Danton 	allocated += i;
2376e4e574b7SAdam Litke 
2377e4e574b7SAdam Litke 	/*
2378e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2379e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
2380e4e574b7SAdam Litke 	 */
2381db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2382a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
2383a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
238428073b02SHillf Danton 	if (needed > 0) {
238528073b02SHillf Danton 		if (alloc_ok)
2386e4e574b7SAdam Litke 			goto retry;
238728073b02SHillf Danton 		/*
238828073b02SHillf Danton 		 * We were not able to allocate enough pages to
238928073b02SHillf Danton 		 * satisfy the entire reservation so we free what
239028073b02SHillf Danton 		 * we've allocated so far.
239128073b02SHillf Danton 		 */
239228073b02SHillf Danton 		goto free;
239328073b02SHillf Danton 	}
2394e4e574b7SAdam Litke 	/*
2395e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
239625985edcSLucas De Marchi 	 * needed to accommodate the reservation.  Add the appropriate number
2397e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
2398ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
2399ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
2400ac09b3a1SAdam Litke 	 * before they are reserved.
2401e4e574b7SAdam Litke 	 */
2402e4e574b7SAdam Litke 	needed += allocated;
2403a5516438SAndi Kleen 	h->resv_huge_pages += delta;
2404e4e574b7SAdam Litke 	ret = 0;
2405a9869b83SNaoya Horiguchi 
240619fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
240719fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
240819fc3f0aSAdam Litke 		if ((--needed) < 0)
240919fc3f0aSAdam Litke 			break;
2410b65a4edaSMike Kravetz 		/* Add the page to the hugetlb allocator */
2411a5516438SAndi Kleen 		enqueue_huge_page(h, page);
241219fc3f0aSAdam Litke 	}
241328073b02SHillf Danton free:
2414db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
241519fc3f0aSAdam Litke 
2416b65a4edaSMike Kravetz 	/*
2417b65a4edaSMike Kravetz 	 * Free unnecessary surplus pages to the buddy allocator.
2418b65a4edaSMike Kravetz 	 * Pages have no ref count, call free_huge_page directly.
2419b65a4edaSMike Kravetz 	 */
2420c0d934baSJoonsoo Kim 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2421b65a4edaSMike Kravetz 		free_huge_page(page);
2422db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2423e4e574b7SAdam Litke 
2424e4e574b7SAdam Litke 	return ret;
2425e4e574b7SAdam Litke }
2426e4e574b7SAdam Litke 
2427e4e574b7SAdam Litke /*
2428e5bbc8a6SMike Kravetz  * This routine has two main purposes:
2429e5bbc8a6SMike Kravetz  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2430e5bbc8a6SMike Kravetz  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2431e5bbc8a6SMike Kravetz  *    to the associated reservation map.
2432e5bbc8a6SMike Kravetz  * 2) Free any unused surplus pages that may have been allocated to satisfy
2433e5bbc8a6SMike Kravetz  *    the reservation.  As many as unused_resv_pages may be freed.
2434e4e574b7SAdam Litke  */
2435a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
2436a5516438SAndi Kleen 					unsigned long unused_resv_pages)
2437e4e574b7SAdam Litke {
2438e4e574b7SAdam Litke 	unsigned long nr_pages;
243910c6ec49SMike Kravetz 	struct page *page;
244010c6ec49SMike Kravetz 	LIST_HEAD(page_list);
244110c6ec49SMike Kravetz 
24429487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
244310c6ec49SMike Kravetz 	/* Uncommit the reservation */
244410c6ec49SMike Kravetz 	h->resv_huge_pages -= unused_resv_pages;
2445e4e574b7SAdam Litke 
2446c0531714SNaoya Horiguchi 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2447e5bbc8a6SMike Kravetz 		goto out;
2448aa888a74SAndi Kleen 
2449e5bbc8a6SMike Kravetz 	/*
2450e5bbc8a6SMike Kravetz 	 * Part (or even all) of the reservation could have been backed
2451e5bbc8a6SMike Kravetz 	 * by pre-allocated pages. Only free surplus pages.
2452e5bbc8a6SMike Kravetz 	 */
2453a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2454e4e574b7SAdam Litke 
2455685f3457SLee Schermerhorn 	/*
2456685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
24579b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
24589b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
24599b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
246010c6ec49SMike Kravetz 	 * remove_pool_huge_page() will balance the freed pages across the
24619b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
2462685f3457SLee Schermerhorn 	 */
2463685f3457SLee Schermerhorn 	while (nr_pages--) {
246410c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
246510c6ec49SMike Kravetz 		if (!page)
2466e5bbc8a6SMike Kravetz 			goto out;
246710c6ec49SMike Kravetz 
246810c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
2469e4e574b7SAdam Litke 	}
2470e5bbc8a6SMike Kravetz 
2471e5bbc8a6SMike Kravetz out:
2472db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
247310c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
2474db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2475e4e574b7SAdam Litke }
2476e4e574b7SAdam Litke 
24775e911373SMike Kravetz 
2478c37f9fb1SAndy Whitcroft /*
2479feba16e2SMike Kravetz  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
24805e911373SMike Kravetz  * are used by the huge page allocation routines to manage reservations.
2481cf3ad20bSMike Kravetz  *
2482cf3ad20bSMike Kravetz  * vma_needs_reservation is called to determine if the huge page at addr
2483cf3ad20bSMike Kravetz  * within the vma has an associated reservation.  If a reservation is
2484cf3ad20bSMike Kravetz  * needed, the value 1 is returned.  The caller is then responsible for
2485cf3ad20bSMike Kravetz  * managing the global reservation and subpool usage counts.  After
2486cf3ad20bSMike Kravetz  * the huge page has been allocated, vma_commit_reservation is called
2487feba16e2SMike Kravetz  * to add the page to the reservation map.  If the page allocation fails,
2488feba16e2SMike Kravetz  * the reservation must be ended instead of committed.  vma_end_reservation
2489feba16e2SMike Kravetz  * is called in such cases.
2490cf3ad20bSMike Kravetz  *
2491cf3ad20bSMike Kravetz  * In the normal case, vma_commit_reservation returns the same value
2492cf3ad20bSMike Kravetz  * as the preceding vma_needs_reservation call.  The only time this
2493cf3ad20bSMike Kravetz  * is not the case is if a reserve map was changed between calls.  It
2494cf3ad20bSMike Kravetz  * is the responsibility of the caller to notice the difference and
2495cf3ad20bSMike Kravetz  * take appropriate action.
249696b96a96SMike Kravetz  *
249796b96a96SMike Kravetz  * vma_add_reservation is used in error paths where a reservation must
249896b96a96SMike Kravetz  * be restored when a newly allocated huge page must be freed.  It is
249996b96a96SMike Kravetz  * to be called after calling vma_needs_reservation to determine if a
250096b96a96SMike Kravetz  * reservation exists.
2501846be085SMike Kravetz  *
2502846be085SMike Kravetz  * vma_del_reservation is used in error paths where an entry in the reserve
2503846be085SMike Kravetz  * map was created during huge page allocation and must be removed.  It is to
2504846be085SMike Kravetz  * be called after calling vma_needs_reservation to determine if a reservation
2505846be085SMike Kravetz  * exists.
2506c37f9fb1SAndy Whitcroft  */
25075e911373SMike Kravetz enum vma_resv_mode {
25085e911373SMike Kravetz 	VMA_NEEDS_RESV,
25095e911373SMike Kravetz 	VMA_COMMIT_RESV,
2510feba16e2SMike Kravetz 	VMA_END_RESV,
251196b96a96SMike Kravetz 	VMA_ADD_RESV,
2512846be085SMike Kravetz 	VMA_DEL_RESV,
25135e911373SMike Kravetz };
2514cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
2515cf3ad20bSMike Kravetz 				struct vm_area_struct *vma, unsigned long addr,
25165e911373SMike Kravetz 				enum vma_resv_mode mode)
2517c37f9fb1SAndy Whitcroft {
25184e35f483SJoonsoo Kim 	struct resv_map *resv;
25194e35f483SJoonsoo Kim 	pgoff_t idx;
2520cf3ad20bSMike Kravetz 	long ret;
25210db9d74eSMina Almasry 	long dummy_out_regions_needed;
2522c37f9fb1SAndy Whitcroft 
25234e35f483SJoonsoo Kim 	resv = vma_resv_map(vma);
25244e35f483SJoonsoo Kim 	if (!resv)
2525c37f9fb1SAndy Whitcroft 		return 1;
2526c37f9fb1SAndy Whitcroft 
25274e35f483SJoonsoo Kim 	idx = vma_hugecache_offset(h, vma, addr);
25285e911373SMike Kravetz 	switch (mode) {
25295e911373SMike Kravetz 	case VMA_NEEDS_RESV:
25300db9d74eSMina Almasry 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
25310db9d74eSMina Almasry 		/* We assume that vma_reservation_* routines always operate on
25320db9d74eSMina Almasry 		 * 1 page, and that adding to resv map a 1 page entry can only
25330db9d74eSMina Almasry 		 * ever require 1 region.
25340db9d74eSMina Almasry 		 */
25350db9d74eSMina Almasry 		VM_BUG_ON(dummy_out_regions_needed != 1);
25365e911373SMike Kravetz 		break;
25375e911373SMike Kravetz 	case VMA_COMMIT_RESV:
2538075a61d0SMina Almasry 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25390db9d74eSMina Almasry 		/* region_add calls of range 1 should never fail. */
25400db9d74eSMina Almasry 		VM_BUG_ON(ret < 0);
25415e911373SMike Kravetz 		break;
2542feba16e2SMike Kravetz 	case VMA_END_RESV:
25430db9d74eSMina Almasry 		region_abort(resv, idx, idx + 1, 1);
25445e911373SMike Kravetz 		ret = 0;
25455e911373SMike Kravetz 		break;
254696b96a96SMike Kravetz 	case VMA_ADD_RESV:
25470db9d74eSMina Almasry 		if (vma->vm_flags & VM_MAYSHARE) {
2548075a61d0SMina Almasry 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25490db9d74eSMina Almasry 			/* region_add calls of range 1 should never fail. */
25500db9d74eSMina Almasry 			VM_BUG_ON(ret < 0);
25510db9d74eSMina Almasry 		} else {
25520db9d74eSMina Almasry 			region_abort(resv, idx, idx + 1, 1);
255396b96a96SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
255496b96a96SMike Kravetz 		}
255596b96a96SMike Kravetz 		break;
2556846be085SMike Kravetz 	case VMA_DEL_RESV:
2557846be085SMike Kravetz 		if (vma->vm_flags & VM_MAYSHARE) {
2558846be085SMike Kravetz 			region_abort(resv, idx, idx + 1, 1);
2559846be085SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
2560846be085SMike Kravetz 		} else {
2561846be085SMike Kravetz 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2562846be085SMike Kravetz 			/* region_add calls of range 1 should never fail. */
2563846be085SMike Kravetz 			VM_BUG_ON(ret < 0);
2564846be085SMike Kravetz 		}
2565846be085SMike Kravetz 		break;
25665e911373SMike Kravetz 	default:
25675e911373SMike Kravetz 		BUG();
25685e911373SMike Kravetz 	}
256984afd99bSAndy Whitcroft 
2570846be085SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2571cf3ad20bSMike Kravetz 		return ret;
257267961f9dSMike Kravetz 	/*
2573bf3d12b9SMiaohe Lin 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2574bf3d12b9SMiaohe Lin 	 *
257567961f9dSMike Kravetz 	 * In most cases, reserves always exist for private mappings.
257667961f9dSMike Kravetz 	 * However, a file associated with mapping could have been
257767961f9dSMike Kravetz 	 * hole punched or truncated after reserves were consumed.
257867961f9dSMike Kravetz 	 * As subsequent fault on such a range will not use reserves.
257967961f9dSMike Kravetz 	 * Subtle - The reserve map for private mappings has the
258067961f9dSMike Kravetz 	 * opposite meaning than that of shared mappings.  If NO
258167961f9dSMike Kravetz 	 * entry is in the reserve map, it means a reservation exists.
258267961f9dSMike Kravetz 	 * If an entry exists in the reserve map, it means the
258367961f9dSMike Kravetz 	 * reservation has already been consumed.  As a result, the
258467961f9dSMike Kravetz 	 * return value of this routine is the opposite of the
258567961f9dSMike Kravetz 	 * value returned from reserve map manipulation routines above.
258667961f9dSMike Kravetz 	 */
2587bf3d12b9SMiaohe Lin 	if (ret > 0)
258867961f9dSMike Kravetz 		return 0;
2589bf3d12b9SMiaohe Lin 	if (ret == 0)
259067961f9dSMike Kravetz 		return 1;
2591bf3d12b9SMiaohe Lin 	return ret;
259284afd99bSAndy Whitcroft }
2593cf3ad20bSMike Kravetz 
2594cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
2595a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
2596c37f9fb1SAndy Whitcroft {
25975e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2598cf3ad20bSMike Kravetz }
2599c37f9fb1SAndy Whitcroft 
2600cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
2601cf3ad20bSMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
2602cf3ad20bSMike Kravetz {
26035e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
26045e911373SMike Kravetz }
26055e911373SMike Kravetz 
2606feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
26075e911373SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
26085e911373SMike Kravetz {
2609feba16e2SMike Kravetz 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2610c37f9fb1SAndy Whitcroft }
2611c37f9fb1SAndy Whitcroft 
261296b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
261396b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
261496b96a96SMike Kravetz {
261596b96a96SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
261696b96a96SMike Kravetz }
261796b96a96SMike Kravetz 
2618846be085SMike Kravetz static long vma_del_reservation(struct hstate *h,
2619846be085SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
262096b96a96SMike Kravetz {
2621846be085SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2622846be085SMike Kravetz }
2623846be085SMike Kravetz 
2624846be085SMike Kravetz /*
2625846be085SMike Kravetz  * This routine is called to restore reservation information on error paths.
2626846be085SMike Kravetz  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2627846be085SMike Kravetz  * the hugetlb mutex should remain held when calling this routine.
2628846be085SMike Kravetz  *
2629846be085SMike Kravetz  * It handles two specific cases:
2630846be085SMike Kravetz  * 1) A reservation was in place and the page consumed the reservation.
2631846be085SMike Kravetz  *    HPageRestoreReserve is set in the page.
2632846be085SMike Kravetz  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2633846be085SMike Kravetz  *    not set.  However, alloc_huge_page always updates the reserve map.
2634846be085SMike Kravetz  *
2635846be085SMike Kravetz  * In case 1, free_huge_page later in the error path will increment the
2636846be085SMike Kravetz  * global reserve count.  But, free_huge_page does not have enough context
2637846be085SMike Kravetz  * to adjust the reservation map.  This case deals primarily with private
2638846be085SMike Kravetz  * mappings.  Adjust the reserve map here to be consistent with global
2639846be085SMike Kravetz  * reserve count adjustments to be made by free_huge_page.  Make sure the
2640846be085SMike Kravetz  * reserve map indicates there is a reservation present.
2641846be085SMike Kravetz  *
2642846be085SMike Kravetz  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2643846be085SMike Kravetz  */
2644846be085SMike Kravetz void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2645846be085SMike Kravetz 			unsigned long address, struct page *page)
2646846be085SMike Kravetz {
264796b96a96SMike Kravetz 	long rc = vma_needs_reservation(h, vma, address);
264896b96a96SMike Kravetz 
2649846be085SMike Kravetz 	if (HPageRestoreReserve(page)) {
2650846be085SMike Kravetz 		if (unlikely(rc < 0))
265196b96a96SMike Kravetz 			/*
265296b96a96SMike Kravetz 			 * Rare out of memory condition in reserve map
2653d6995da3SMike Kravetz 			 * manipulation.  Clear HPageRestoreReserve so that
265496b96a96SMike Kravetz 			 * global reserve count will not be incremented
265596b96a96SMike Kravetz 			 * by free_huge_page.  This will make it appear
265696b96a96SMike Kravetz 			 * as though the reservation for this page was
265796b96a96SMike Kravetz 			 * consumed.  This may prevent the task from
265896b96a96SMike Kravetz 			 * faulting in the page at a later time.  This
265996b96a96SMike Kravetz 			 * is better than inconsistent global huge page
266096b96a96SMike Kravetz 			 * accounting of reserve counts.
266196b96a96SMike Kravetz 			 */
2662d6995da3SMike Kravetz 			ClearHPageRestoreReserve(page);
2663846be085SMike Kravetz 		else if (rc)
2664846be085SMike Kravetz 			(void)vma_add_reservation(h, vma, address);
2665846be085SMike Kravetz 		else
2666846be085SMike Kravetz 			vma_end_reservation(h, vma, address);
2667846be085SMike Kravetz 	} else {
2668846be085SMike Kravetz 		if (!rc) {
266996b96a96SMike Kravetz 			/*
2670846be085SMike Kravetz 			 * This indicates there is an entry in the reserve map
2671c7b1850dSMike Kravetz 			 * not added by alloc_huge_page.  We know it was added
2672846be085SMike Kravetz 			 * before the alloc_huge_page call, otherwise
2673846be085SMike Kravetz 			 * HPageRestoreReserve would be set on the page.
2674846be085SMike Kravetz 			 * Remove the entry so that a subsequent allocation
2675846be085SMike Kravetz 			 * does not consume a reservation.
267696b96a96SMike Kravetz 			 */
2677846be085SMike Kravetz 			rc = vma_del_reservation(h, vma, address);
2678846be085SMike Kravetz 			if (rc < 0)
2679846be085SMike Kravetz 				/*
2680846be085SMike Kravetz 				 * VERY rare out of memory condition.  Since
2681846be085SMike Kravetz 				 * we can not delete the entry, set
2682846be085SMike Kravetz 				 * HPageRestoreReserve so that the reserve
2683846be085SMike Kravetz 				 * count will be incremented when the page
2684846be085SMike Kravetz 				 * is freed.  This reserve will be consumed
2685846be085SMike Kravetz 				 * on a subsequent allocation.
2686846be085SMike Kravetz 				 */
2687846be085SMike Kravetz 				SetHPageRestoreReserve(page);
2688846be085SMike Kravetz 		} else if (rc < 0) {
2689846be085SMike Kravetz 			/*
2690846be085SMike Kravetz 			 * Rare out of memory condition from
2691846be085SMike Kravetz 			 * vma_needs_reservation call.  Memory allocation is
2692846be085SMike Kravetz 			 * only attempted if a new entry is needed.  Therefore,
2693846be085SMike Kravetz 			 * this implies there is not an entry in the
2694846be085SMike Kravetz 			 * reserve map.
2695846be085SMike Kravetz 			 *
2696846be085SMike Kravetz 			 * For shared mappings, no entry in the map indicates
2697846be085SMike Kravetz 			 * no reservation.  We are done.
2698846be085SMike Kravetz 			 */
2699846be085SMike Kravetz 			if (!(vma->vm_flags & VM_MAYSHARE))
2700846be085SMike Kravetz 				/*
2701846be085SMike Kravetz 				 * For private mappings, no entry indicates
2702846be085SMike Kravetz 				 * a reservation is present.  Since we can
2703846be085SMike Kravetz 				 * not add an entry, set SetHPageRestoreReserve
2704846be085SMike Kravetz 				 * on the page so reserve count will be
2705846be085SMike Kravetz 				 * incremented when freed.  This reserve will
2706846be085SMike Kravetz 				 * be consumed on a subsequent allocation.
2707846be085SMike Kravetz 				 */
2708846be085SMike Kravetz 				SetHPageRestoreReserve(page);
270996b96a96SMike Kravetz 		} else
2710846be085SMike Kravetz 			/*
2711846be085SMike Kravetz 			 * No reservation present, do nothing
2712846be085SMike Kravetz 			 */
271396b96a96SMike Kravetz 			 vma_end_reservation(h, vma, address);
271496b96a96SMike Kravetz 	}
271596b96a96SMike Kravetz }
271696b96a96SMike Kravetz 
2717369fa227SOscar Salvador /*
2718369fa227SOscar Salvador  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2719369fa227SOscar Salvador  * @h: struct hstate old page belongs to
2720369fa227SOscar Salvador  * @old_page: Old page to dissolve
2721ae37c7ffSOscar Salvador  * @list: List to isolate the page in case we need to
2722369fa227SOscar Salvador  * Returns 0 on success, otherwise negated error.
2723369fa227SOscar Salvador  */
2724ae37c7ffSOscar Salvador static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2725ae37c7ffSOscar Salvador 					struct list_head *list)
2726369fa227SOscar Salvador {
2727369fa227SOscar Salvador 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2728369fa227SOscar Salvador 	int nid = page_to_nid(old_page);
2729b65a4edaSMike Kravetz 	bool alloc_retry = false;
2730369fa227SOscar Salvador 	struct page *new_page;
2731369fa227SOscar Salvador 	int ret = 0;
2732369fa227SOscar Salvador 
2733369fa227SOscar Salvador 	/*
2734369fa227SOscar Salvador 	 * Before dissolving the page, we need to allocate a new one for the
2735f41f2ed4SMuchun Song 	 * pool to remain stable.  Here, we allocate the page and 'prep' it
2736f41f2ed4SMuchun Song 	 * by doing everything but actually updating counters and adding to
2737f41f2ed4SMuchun Song 	 * the pool.  This simplifies and let us do most of the processing
2738f41f2ed4SMuchun Song 	 * under the lock.
2739369fa227SOscar Salvador 	 */
2740b65a4edaSMike Kravetz alloc_retry:
2741369fa227SOscar Salvador 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2742369fa227SOscar Salvador 	if (!new_page)
2743369fa227SOscar Salvador 		return -ENOMEM;
2744b65a4edaSMike Kravetz 	/*
2745b65a4edaSMike Kravetz 	 * If all goes well, this page will be directly added to the free
2746b65a4edaSMike Kravetz 	 * list in the pool.  For this the ref count needs to be zero.
2747b65a4edaSMike Kravetz 	 * Attempt to drop now, and retry once if needed.  It is VERY
2748b65a4edaSMike Kravetz 	 * unlikely there is another ref on the page.
2749b65a4edaSMike Kravetz 	 *
2750b65a4edaSMike Kravetz 	 * If someone else has a reference to the page, it will be freed
2751b65a4edaSMike Kravetz 	 * when they drop their ref.  Abuse temporary page flag to accomplish
2752b65a4edaSMike Kravetz 	 * this.  Retry once if there is an inflated ref count.
2753b65a4edaSMike Kravetz 	 */
2754b65a4edaSMike Kravetz 	SetHPageTemporary(new_page);
2755b65a4edaSMike Kravetz 	if (!put_page_testzero(new_page)) {
2756b65a4edaSMike Kravetz 		if (alloc_retry)
2757b65a4edaSMike Kravetz 			return -EBUSY;
2758b65a4edaSMike Kravetz 
2759b65a4edaSMike Kravetz 		alloc_retry = true;
2760b65a4edaSMike Kravetz 		goto alloc_retry;
2761b65a4edaSMike Kravetz 	}
2762b65a4edaSMike Kravetz 	ClearHPageTemporary(new_page);
2763b65a4edaSMike Kravetz 
2764f41f2ed4SMuchun Song 	__prep_new_huge_page(h, new_page);
2765369fa227SOscar Salvador 
2766369fa227SOscar Salvador retry:
2767369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2768369fa227SOscar Salvador 	if (!PageHuge(old_page)) {
2769369fa227SOscar Salvador 		/*
2770369fa227SOscar Salvador 		 * Freed from under us. Drop new_page too.
2771369fa227SOscar Salvador 		 */
2772369fa227SOscar Salvador 		goto free_new;
2773369fa227SOscar Salvador 	} else if (page_count(old_page)) {
2774369fa227SOscar Salvador 		/*
2775ae37c7ffSOscar Salvador 		 * Someone has grabbed the page, try to isolate it here.
2776ae37c7ffSOscar Salvador 		 * Fail with -EBUSY if not possible.
2777369fa227SOscar Salvador 		 */
2778ae37c7ffSOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
27797ce82f4cSMiaohe Lin 		ret = isolate_hugetlb(old_page, list);
2780ae37c7ffSOscar Salvador 		spin_lock_irq(&hugetlb_lock);
2781369fa227SOscar Salvador 		goto free_new;
2782369fa227SOscar Salvador 	} else if (!HPageFreed(old_page)) {
2783369fa227SOscar Salvador 		/*
2784369fa227SOscar Salvador 		 * Page's refcount is 0 but it has not been enqueued in the
2785369fa227SOscar Salvador 		 * freelist yet. Race window is small, so we can succeed here if
2786369fa227SOscar Salvador 		 * we retry.
2787369fa227SOscar Salvador 		 */
2788369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2789369fa227SOscar Salvador 		cond_resched();
2790369fa227SOscar Salvador 		goto retry;
2791369fa227SOscar Salvador 	} else {
2792369fa227SOscar Salvador 		/*
2793369fa227SOscar Salvador 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2794369fa227SOscar Salvador 		 * the freelist and decrease the counters. These will be
2795369fa227SOscar Salvador 		 * incremented again when calling __prep_account_new_huge_page()
2796369fa227SOscar Salvador 		 * and enqueue_huge_page() for new_page. The counters will remain
2797369fa227SOscar Salvador 		 * stable since this happens under the lock.
2798369fa227SOscar Salvador 		 */
2799369fa227SOscar Salvador 		remove_hugetlb_page(h, old_page, false);
2800369fa227SOscar Salvador 
2801369fa227SOscar Salvador 		/*
2802b65a4edaSMike Kravetz 		 * Ref count on new page is already zero as it was dropped
2803b65a4edaSMike Kravetz 		 * earlier.  It can be directly added to the pool free list.
2804369fa227SOscar Salvador 		 */
2805369fa227SOscar Salvador 		__prep_account_new_huge_page(h, nid);
2806369fa227SOscar Salvador 		enqueue_huge_page(h, new_page);
2807369fa227SOscar Salvador 
2808369fa227SOscar Salvador 		/*
2809369fa227SOscar Salvador 		 * Pages have been replaced, we can safely free the old one.
2810369fa227SOscar Salvador 		 */
2811369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2812b65d4adbSMuchun Song 		update_and_free_page(h, old_page, false);
2813369fa227SOscar Salvador 	}
2814369fa227SOscar Salvador 
2815369fa227SOscar Salvador 	return ret;
2816369fa227SOscar Salvador 
2817369fa227SOscar Salvador free_new:
2818369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2819b65a4edaSMike Kravetz 	/* Page has a zero ref count, but needs a ref to be freed */
2820b65a4edaSMike Kravetz 	set_page_refcounted(new_page);
2821b65d4adbSMuchun Song 	update_and_free_page(h, new_page, false);
2822369fa227SOscar Salvador 
2823369fa227SOscar Salvador 	return ret;
2824369fa227SOscar Salvador }
2825369fa227SOscar Salvador 
2826ae37c7ffSOscar Salvador int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2827369fa227SOscar Salvador {
2828369fa227SOscar Salvador 	struct hstate *h;
2829369fa227SOscar Salvador 	struct page *head;
2830ae37c7ffSOscar Salvador 	int ret = -EBUSY;
2831369fa227SOscar Salvador 
2832369fa227SOscar Salvador 	/*
2833369fa227SOscar Salvador 	 * The page might have been dissolved from under our feet, so make sure
2834369fa227SOscar Salvador 	 * to carefully check the state under the lock.
2835369fa227SOscar Salvador 	 * Return success when racing as if we dissolved the page ourselves.
2836369fa227SOscar Salvador 	 */
2837369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2838369fa227SOscar Salvador 	if (PageHuge(page)) {
2839369fa227SOscar Salvador 		head = compound_head(page);
2840369fa227SOscar Salvador 		h = page_hstate(head);
2841369fa227SOscar Salvador 	} else {
2842369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2843369fa227SOscar Salvador 		return 0;
2844369fa227SOscar Salvador 	}
2845369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2846369fa227SOscar Salvador 
2847369fa227SOscar Salvador 	/*
2848369fa227SOscar Salvador 	 * Fence off gigantic pages as there is a cyclic dependency between
2849369fa227SOscar Salvador 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2850369fa227SOscar Salvador 	 * of bailing out right away without further retrying.
2851369fa227SOscar Salvador 	 */
2852369fa227SOscar Salvador 	if (hstate_is_gigantic(h))
2853369fa227SOscar Salvador 		return -ENOMEM;
2854369fa227SOscar Salvador 
28557ce82f4cSMiaohe Lin 	if (page_count(head) && !isolate_hugetlb(head, list))
2856ae37c7ffSOscar Salvador 		ret = 0;
2857ae37c7ffSOscar Salvador 	else if (!page_count(head))
2858ae37c7ffSOscar Salvador 		ret = alloc_and_dissolve_huge_page(h, head, list);
2859ae37c7ffSOscar Salvador 
2860ae37c7ffSOscar Salvador 	return ret;
2861369fa227SOscar Salvador }
2862369fa227SOscar Salvador 
286370c3547eSMike Kravetz struct page *alloc_huge_page(struct vm_area_struct *vma,
286404f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
2865348ea204SAdam Litke {
286690481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
2867a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2868348ea204SAdam Litke 	struct page *page;
2869d85f69b0SMike Kravetz 	long map_chg, map_commit;
2870d85f69b0SMike Kravetz 	long gbl_chg;
28716d76dcf4SAneesh Kumar K.V 	int ret, idx;
28726d76dcf4SAneesh Kumar K.V 	struct hugetlb_cgroup *h_cg;
287308cf9fafSMina Almasry 	bool deferred_reserve;
28742fc39cecSAdam Litke 
28756d76dcf4SAneesh Kumar K.V 	idx = hstate_index(h);
2876a1e78772SMel Gorman 	/*
2877d85f69b0SMike Kravetz 	 * Examine the region/reserve map to determine if the process
2878d85f69b0SMike Kravetz 	 * has a reservation for the page to be allocated.  A return
2879d85f69b0SMike Kravetz 	 * code of zero indicates a reservation exists (no change).
2880a1e78772SMel Gorman 	 */
2881d85f69b0SMike Kravetz 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2882d85f69b0SMike Kravetz 	if (map_chg < 0)
288376dcee75SAneesh Kumar K.V 		return ERR_PTR(-ENOMEM);
2884d85f69b0SMike Kravetz 
2885d85f69b0SMike Kravetz 	/*
2886d85f69b0SMike Kravetz 	 * Processes that did not create the mapping will have no
2887d85f69b0SMike Kravetz 	 * reserves as indicated by the region/reserve map. Check
2888d85f69b0SMike Kravetz 	 * that the allocation will not exceed the subpool limit.
2889d85f69b0SMike Kravetz 	 * Allocations for MAP_NORESERVE mappings also need to be
2890d85f69b0SMike Kravetz 	 * checked against any subpool limit.
2891d85f69b0SMike Kravetz 	 */
2892d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve) {
2893d85f69b0SMike Kravetz 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2894d85f69b0SMike Kravetz 		if (gbl_chg < 0) {
2895feba16e2SMike Kravetz 			vma_end_reservation(h, vma, addr);
289676dcee75SAneesh Kumar K.V 			return ERR_PTR(-ENOSPC);
28975e911373SMike Kravetz 		}
289890d8b7e6SAdam Litke 
2899d85f69b0SMike Kravetz 		/*
2900d85f69b0SMike Kravetz 		 * Even though there was no reservation in the region/reserve
2901d85f69b0SMike Kravetz 		 * map, there could be reservations associated with the
2902d85f69b0SMike Kravetz 		 * subpool that can be used.  This would be indicated if the
2903d85f69b0SMike Kravetz 		 * return value of hugepage_subpool_get_pages() is zero.
2904d85f69b0SMike Kravetz 		 * However, if avoid_reserve is specified we still avoid even
2905d85f69b0SMike Kravetz 		 * the subpool reservations.
2906d85f69b0SMike Kravetz 		 */
2907d85f69b0SMike Kravetz 		if (avoid_reserve)
2908d85f69b0SMike Kravetz 			gbl_chg = 1;
2909d85f69b0SMike Kravetz 	}
2910d85f69b0SMike Kravetz 
291108cf9fafSMina Almasry 	/* If this allocation is not consuming a reservation, charge it now.
291208cf9fafSMina Almasry 	 */
29136501fe5fSMiaohe Lin 	deferred_reserve = map_chg || avoid_reserve;
291408cf9fafSMina Almasry 	if (deferred_reserve) {
291508cf9fafSMina Almasry 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
291608cf9fafSMina Almasry 			idx, pages_per_huge_page(h), &h_cg);
29178f34af6fSJianyu Zhan 		if (ret)
29188f34af6fSJianyu Zhan 			goto out_subpool_put;
291908cf9fafSMina Almasry 	}
292008cf9fafSMina Almasry 
292108cf9fafSMina Almasry 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
292208cf9fafSMina Almasry 	if (ret)
292308cf9fafSMina Almasry 		goto out_uncharge_cgroup_reservation;
29248f34af6fSJianyu Zhan 
2925db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2926d85f69b0SMike Kravetz 	/*
2927d85f69b0SMike Kravetz 	 * glb_chg is passed to indicate whether or not a page must be taken
2928d85f69b0SMike Kravetz 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2929d85f69b0SMike Kravetz 	 * a reservation exists for the allocation.
2930d85f69b0SMike Kravetz 	 */
2931d85f69b0SMike Kravetz 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
293281a6fcaeSJoonsoo Kim 	if (!page) {
2933db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
29340c397daeSMichal Hocko 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
29358f34af6fSJianyu Zhan 		if (!page)
29368f34af6fSJianyu Zhan 			goto out_uncharge_cgroup;
2937a88c7695SNaoya Horiguchi 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2938d6995da3SMike Kravetz 			SetHPageRestoreReserve(page);
2939a88c7695SNaoya Horiguchi 			h->resv_huge_pages--;
2940a88c7695SNaoya Horiguchi 		}
2941db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
294215a8d68eSWei Yang 		list_add(&page->lru, &h->hugepage_activelist);
294381a6fcaeSJoonsoo Kim 		/* Fall through */
2944a1e78772SMel Gorman 	}
294581a6fcaeSJoonsoo Kim 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
294608cf9fafSMina Almasry 	/* If allocation is not consuming a reservation, also store the
294708cf9fafSMina Almasry 	 * hugetlb_cgroup pointer on the page.
294808cf9fafSMina Almasry 	 */
294908cf9fafSMina Almasry 	if (deferred_reserve) {
295008cf9fafSMina Almasry 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
295108cf9fafSMina Almasry 						  h_cg, page);
295208cf9fafSMina Almasry 	}
295308cf9fafSMina Almasry 
2954db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2955a1e78772SMel Gorman 
2956d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, spool);
2957a1e78772SMel Gorman 
2958d85f69b0SMike Kravetz 	map_commit = vma_commit_reservation(h, vma, addr);
2959d85f69b0SMike Kravetz 	if (unlikely(map_chg > map_commit)) {
296033039678SMike Kravetz 		/*
296133039678SMike Kravetz 		 * The page was added to the reservation map between
296233039678SMike Kravetz 		 * vma_needs_reservation and vma_commit_reservation.
296333039678SMike Kravetz 		 * This indicates a race with hugetlb_reserve_pages.
296433039678SMike Kravetz 		 * Adjust for the subpool count incremented above AND
296533039678SMike Kravetz 		 * in hugetlb_reserve_pages for the same page.  Also,
296633039678SMike Kravetz 		 * the reservation count added in hugetlb_reserve_pages
296733039678SMike Kravetz 		 * no longer applies.
296833039678SMike Kravetz 		 */
296933039678SMike Kravetz 		long rsv_adjust;
297033039678SMike Kravetz 
297133039678SMike Kravetz 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
297233039678SMike Kravetz 		hugetlb_acct_memory(h, -rsv_adjust);
297379aa925bSMike Kravetz 		if (deferred_reserve)
297479aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
297579aa925bSMike Kravetz 					pages_per_huge_page(h), page);
297633039678SMike Kravetz 	}
29777893d1d5SAdam Litke 	return page;
29788f34af6fSJianyu Zhan 
29798f34af6fSJianyu Zhan out_uncharge_cgroup:
29808f34af6fSJianyu Zhan 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
298108cf9fafSMina Almasry out_uncharge_cgroup_reservation:
298208cf9fafSMina Almasry 	if (deferred_reserve)
298308cf9fafSMina Almasry 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
298408cf9fafSMina Almasry 						    h_cg);
29858f34af6fSJianyu Zhan out_subpool_put:
2986d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve)
29878f34af6fSJianyu Zhan 		hugepage_subpool_put_pages(spool, 1);
2988feba16e2SMike Kravetz 	vma_end_reservation(h, vma, addr);
29898f34af6fSJianyu Zhan 	return ERR_PTR(-ENOSPC);
2990b45b5bd6SDavid Gibson }
2991b45b5bd6SDavid Gibson 
2992b5389086SZhenguo Yao int alloc_bootmem_huge_page(struct hstate *h, int nid)
2993e24a1307SAneesh Kumar K.V 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2994b5389086SZhenguo Yao int __alloc_bootmem_huge_page(struct hstate *h, int nid)
2995aa888a74SAndi Kleen {
2996b5389086SZhenguo Yao 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
2997b2261026SJoonsoo Kim 	int nr_nodes, node;
2998aa888a74SAndi Kleen 
2999b5389086SZhenguo Yao 	/* do node specific alloc */
3000b5389086SZhenguo Yao 	if (nid != NUMA_NO_NODE) {
3001b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3002b5389086SZhenguo Yao 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3003b5389086SZhenguo Yao 		if (!m)
3004b5389086SZhenguo Yao 			return 0;
3005b5389086SZhenguo Yao 		goto found;
3006b5389086SZhenguo Yao 	}
3007b5389086SZhenguo Yao 	/* allocate from next node when distributing huge pages */
3008b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3009b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(
30108b89a116SGrygorii Strashko 				huge_page_size(h), huge_page_size(h),
301197ad1087SMike Rapoport 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3012aa888a74SAndi Kleen 		/*
3013aa888a74SAndi Kleen 		 * Use the beginning of the huge page to store the
3014aa888a74SAndi Kleen 		 * huge_bootmem_page struct (until gather_bootmem
3015aa888a74SAndi Kleen 		 * puts them into the mem_map).
3016aa888a74SAndi Kleen 		 */
3017b5389086SZhenguo Yao 		if (!m)
3018b5389086SZhenguo Yao 			return 0;
3019aa888a74SAndi Kleen 		goto found;
3020aa888a74SAndi Kleen 	}
3021aa888a74SAndi Kleen 
3022aa888a74SAndi Kleen found:
3023aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
3024330d6e48SCannon Matthews 	INIT_LIST_HEAD(&m->list);
3025aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
3026aa888a74SAndi Kleen 	m->hstate = h;
3027aa888a74SAndi Kleen 	return 1;
3028aa888a74SAndi Kleen }
3029aa888a74SAndi Kleen 
303048b8d744SMike Kravetz /*
303148b8d744SMike Kravetz  * Put bootmem huge pages into the standard lists after mem_map is up.
303248b8d744SMike Kravetz  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
303348b8d744SMike Kravetz  */
3034aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
3035aa888a74SAndi Kleen {
3036aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
3037aa888a74SAndi Kleen 
3038aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
303940d18ebfSMike Kravetz 		struct page *page = virt_to_page(m);
3040aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
3041ee8f248dSBecky Bruce 
304248b8d744SMike Kravetz 		VM_BUG_ON(!hstate_is_gigantic(h));
3043aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
30447118fc29SMike Kravetz 		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3045ef5a22beSAndrea Arcangeli 			WARN_ON(PageReserved(page));
3046aa888a74SAndi Kleen 			prep_new_huge_page(h, page, page_to_nid(page));
30477118fc29SMike Kravetz 			put_page(page); /* add to the hugepage allocator */
30487118fc29SMike Kravetz 		} else {
3049416d85edSMike Kravetz 			/* VERY unlikely inflated ref count on a tail page */
30507118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
30517118fc29SMike Kravetz 		}
3052af0fb9dfSMichal Hocko 
3053b0320c7bSRafael Aquini 		/*
305448b8d744SMike Kravetz 		 * We need to restore the 'stolen' pages to totalram_pages
305548b8d744SMike Kravetz 		 * in order to fix confusing memory reports from free(1) and
305648b8d744SMike Kravetz 		 * other side-effects, like CommitLimit going negative.
3057b0320c7bSRafael Aquini 		 */
3058c78a7f36SMiaohe Lin 		adjust_managed_page_count(page, pages_per_huge_page(h));
3059520495feSCannon Matthews 		cond_resched();
3060aa888a74SAndi Kleen 	}
3061aa888a74SAndi Kleen }
3062b5389086SZhenguo Yao static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3063b5389086SZhenguo Yao {
3064b5389086SZhenguo Yao 	unsigned long i;
3065b5389086SZhenguo Yao 	char buf[32];
3066b5389086SZhenguo Yao 
3067b5389086SZhenguo Yao 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3068b5389086SZhenguo Yao 		if (hstate_is_gigantic(h)) {
3069b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, nid))
3070b5389086SZhenguo Yao 				break;
3071b5389086SZhenguo Yao 		} else {
3072b5389086SZhenguo Yao 			struct page *page;
3073b5389086SZhenguo Yao 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3074b5389086SZhenguo Yao 
3075b5389086SZhenguo Yao 			page = alloc_fresh_huge_page(h, gfp_mask, nid,
3076b5389086SZhenguo Yao 					&node_states[N_MEMORY], NULL);
3077b5389086SZhenguo Yao 			if (!page)
3078b5389086SZhenguo Yao 				break;
3079b5389086SZhenguo Yao 			put_page(page); /* free it into the hugepage allocator */
3080b5389086SZhenguo Yao 		}
3081b5389086SZhenguo Yao 		cond_resched();
3082b5389086SZhenguo Yao 	}
3083b5389086SZhenguo Yao 	if (i == h->max_huge_pages_node[nid])
3084b5389086SZhenguo Yao 		return;
3085b5389086SZhenguo Yao 
3086b5389086SZhenguo Yao 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3087b5389086SZhenguo Yao 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3088b5389086SZhenguo Yao 		h->max_huge_pages_node[nid], buf, nid, i);
3089b5389086SZhenguo Yao 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3090b5389086SZhenguo Yao 	h->max_huge_pages_node[nid] = i;
3091b5389086SZhenguo Yao }
3092aa888a74SAndi Kleen 
30938faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
30941da177e4SLinus Torvalds {
30951da177e4SLinus Torvalds 	unsigned long i;
3096f60858f9SMike Kravetz 	nodemask_t *node_alloc_noretry;
3097b5389086SZhenguo Yao 	bool node_specific_alloc = false;
3098f60858f9SMike Kravetz 
3099b5389086SZhenguo Yao 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3100b5389086SZhenguo Yao 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3101b5389086SZhenguo Yao 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3102b5389086SZhenguo Yao 		return;
3103b5389086SZhenguo Yao 	}
3104b5389086SZhenguo Yao 
3105b5389086SZhenguo Yao 	/* do node specific alloc */
31060a7a0f6fSPeng Liu 	for_each_online_node(i) {
3107b5389086SZhenguo Yao 		if (h->max_huge_pages_node[i] > 0) {
3108b5389086SZhenguo Yao 			hugetlb_hstate_alloc_pages_onenode(h, i);
3109b5389086SZhenguo Yao 			node_specific_alloc = true;
3110b5389086SZhenguo Yao 		}
3111b5389086SZhenguo Yao 	}
3112b5389086SZhenguo Yao 
3113b5389086SZhenguo Yao 	if (node_specific_alloc)
3114b5389086SZhenguo Yao 		return;
3115b5389086SZhenguo Yao 
3116b5389086SZhenguo Yao 	/* below will do all node balanced alloc */
3117f60858f9SMike Kravetz 	if (!hstate_is_gigantic(h)) {
3118f60858f9SMike Kravetz 		/*
3119f60858f9SMike Kravetz 		 * Bit mask controlling how hard we retry per-node allocations.
3120f60858f9SMike Kravetz 		 * Ignore errors as lower level routines can deal with
3121f60858f9SMike Kravetz 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3122f60858f9SMike Kravetz 		 * time, we are likely in bigger trouble.
3123f60858f9SMike Kravetz 		 */
3124f60858f9SMike Kravetz 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3125f60858f9SMike Kravetz 						GFP_KERNEL);
3126f60858f9SMike Kravetz 	} else {
3127f60858f9SMike Kravetz 		/* allocations done at boot time */
3128f60858f9SMike Kravetz 		node_alloc_noretry = NULL;
3129f60858f9SMike Kravetz 	}
3130f60858f9SMike Kravetz 
3131f60858f9SMike Kravetz 	/* bit mask controlling how hard we retry per-node allocations */
3132f60858f9SMike Kravetz 	if (node_alloc_noretry)
3133f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
31341da177e4SLinus Torvalds 
3135e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
3136bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h)) {
3137b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3138aa888a74SAndi Kleen 				break;
31390c397daeSMichal Hocko 		} else if (!alloc_pool_huge_page(h,
3140f60858f9SMike Kravetz 					 &node_states[N_MEMORY],
3141f60858f9SMike Kravetz 					 node_alloc_noretry))
31421da177e4SLinus Torvalds 			break;
314369ed779aSDavid Rientjes 		cond_resched();
31441da177e4SLinus Torvalds 	}
3145d715cf80SLiam R. Howlett 	if (i < h->max_huge_pages) {
3146d715cf80SLiam R. Howlett 		char buf[32];
3147d715cf80SLiam R. Howlett 
3148c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3149d715cf80SLiam R. Howlett 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3150d715cf80SLiam R. Howlett 			h->max_huge_pages, buf, i);
31518faa8b07SAndi Kleen 		h->max_huge_pages = i;
3152e5ff2159SAndi Kleen 	}
3153f60858f9SMike Kravetz 	kfree(node_alloc_noretry);
3154d715cf80SLiam R. Howlett }
3155e5ff2159SAndi Kleen 
3156e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
3157e5ff2159SAndi Kleen {
315879dfc695SMike Kravetz 	struct hstate *h, *h2;
3159e5ff2159SAndi Kleen 
3160e5ff2159SAndi Kleen 	for_each_hstate(h) {
31618faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
3162bae7f4aeSLuiz Capitulino 		if (!hstate_is_gigantic(h))
31638faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
316479dfc695SMike Kravetz 
316579dfc695SMike Kravetz 		/*
316679dfc695SMike Kravetz 		 * Set demote order for each hstate.  Note that
316779dfc695SMike Kravetz 		 * h->demote_order is initially 0.
316879dfc695SMike Kravetz 		 * - We can not demote gigantic pages if runtime freeing
316979dfc695SMike Kravetz 		 *   is not supported, so skip this.
3170a01f4390SMike Kravetz 		 * - If CMA allocation is possible, we can not demote
3171a01f4390SMike Kravetz 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
317279dfc695SMike Kravetz 		 */
317379dfc695SMike Kravetz 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
317479dfc695SMike Kravetz 			continue;
3175a01f4390SMike Kravetz 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3176a01f4390SMike Kravetz 			continue;
317779dfc695SMike Kravetz 		for_each_hstate(h2) {
317879dfc695SMike Kravetz 			if (h2 == h)
317979dfc695SMike Kravetz 				continue;
318079dfc695SMike Kravetz 			if (h2->order < h->order &&
318179dfc695SMike Kravetz 			    h2->order > h->demote_order)
318279dfc695SMike Kravetz 				h->demote_order = h2->order;
318379dfc695SMike Kravetz 		}
3184e5ff2159SAndi Kleen 	}
3185e5ff2159SAndi Kleen }
3186e5ff2159SAndi Kleen 
3187e5ff2159SAndi Kleen static void __init report_hugepages(void)
3188e5ff2159SAndi Kleen {
3189e5ff2159SAndi Kleen 	struct hstate *h;
3190e5ff2159SAndi Kleen 
3191e5ff2159SAndi Kleen 	for_each_hstate(h) {
31924abd32dbSAndi Kleen 		char buf[32];
3193c6247f72SMatthew Wilcox 
3194c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
31956213834cSMuchun Song 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3196c6247f72SMatthew Wilcox 			buf, h->free_huge_pages);
31976213834cSMuchun Song 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
31986213834cSMuchun Song 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3199e5ff2159SAndi Kleen 	}
3200e5ff2159SAndi Kleen }
3201e5ff2159SAndi Kleen 
32021da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
32036ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
32046ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
32051da177e4SLinus Torvalds {
32064415cc8dSChristoph Lameter 	int i;
32071121828aSMike Kravetz 	LIST_HEAD(page_list);
32084415cc8dSChristoph Lameter 
32099487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
3210bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3211aa888a74SAndi Kleen 		return;
3212aa888a74SAndi Kleen 
32131121828aSMike Kravetz 	/*
32141121828aSMike Kravetz 	 * Collect pages to be freed on a list, and free after dropping lock
32151121828aSMike Kravetz 	 */
32166ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
321710c6ec49SMike Kravetz 		struct page *page, *next;
3218a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
3219a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
3220a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
32211121828aSMike Kravetz 				goto out;
32221da177e4SLinus Torvalds 			if (PageHighMem(page))
32231da177e4SLinus Torvalds 				continue;
32246eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, false);
32251121828aSMike Kravetz 			list_add(&page->lru, &page_list);
32261121828aSMike Kravetz 		}
32271121828aSMike Kravetz 	}
32281121828aSMike Kravetz 
32291121828aSMike Kravetz out:
3230db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
323110c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3232db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
32331da177e4SLinus Torvalds }
32341da177e4SLinus Torvalds #else
32356ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
32366ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
32371da177e4SLinus Torvalds {
32381da177e4SLinus Torvalds }
32391da177e4SLinus Torvalds #endif
32401da177e4SLinus Torvalds 
324120a0307cSWu Fengguang /*
324220a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
324320a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
324420a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
324520a0307cSWu Fengguang  */
32466ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
32476ae11b27SLee Schermerhorn 				int delta)
324820a0307cSWu Fengguang {
3249b2261026SJoonsoo Kim 	int nr_nodes, node;
325020a0307cSWu Fengguang 
32519487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
325220a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
325320a0307cSWu Fengguang 
3254e8c5c824SLee Schermerhorn 	if (delta < 0) {
3255b2261026SJoonsoo Kim 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3256b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node])
3257b2261026SJoonsoo Kim 				goto found;
3258b2261026SJoonsoo Kim 		}
3259b2261026SJoonsoo Kim 	} else {
3260b2261026SJoonsoo Kim 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3261b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node] <
3262b2261026SJoonsoo Kim 					h->nr_huge_pages_node[node])
3263b2261026SJoonsoo Kim 				goto found;
3264e8c5c824SLee Schermerhorn 		}
32659a76db09SLee Schermerhorn 	}
3266b2261026SJoonsoo Kim 	return 0;
326720a0307cSWu Fengguang 
3268b2261026SJoonsoo Kim found:
326920a0307cSWu Fengguang 	h->surplus_huge_pages += delta;
3270b2261026SJoonsoo Kim 	h->surplus_huge_pages_node[node] += delta;
3271b2261026SJoonsoo Kim 	return 1;
327220a0307cSWu Fengguang }
327320a0307cSWu Fengguang 
3274a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3275fd875dcaSMike Kravetz static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
32766ae11b27SLee Schermerhorn 			      nodemask_t *nodes_allowed)
32771da177e4SLinus Torvalds {
32787893d1d5SAdam Litke 	unsigned long min_count, ret;
327910c6ec49SMike Kravetz 	struct page *page;
328010c6ec49SMike Kravetz 	LIST_HEAD(page_list);
3281f60858f9SMike Kravetz 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3282f60858f9SMike Kravetz 
3283f60858f9SMike Kravetz 	/*
3284f60858f9SMike Kravetz 	 * Bit mask controlling how hard we retry per-node allocations.
3285f60858f9SMike Kravetz 	 * If we can not allocate the bit mask, do not attempt to allocate
3286f60858f9SMike Kravetz 	 * the requested huge pages.
3287f60858f9SMike Kravetz 	 */
3288f60858f9SMike Kravetz 	if (node_alloc_noretry)
3289f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
3290f60858f9SMike Kravetz 	else
3291f60858f9SMike Kravetz 		return -ENOMEM;
32921da177e4SLinus Torvalds 
329329383967SMike Kravetz 	/*
329429383967SMike Kravetz 	 * resize_lock mutex prevents concurrent adjustments to number of
329529383967SMike Kravetz 	 * pages in hstate via the proc/sysfs interfaces.
329629383967SMike Kravetz 	 */
329729383967SMike Kravetz 	mutex_lock(&h->resize_lock);
3298b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3299db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
33004eb0716eSAlexandre Ghiti 
33014eb0716eSAlexandre Ghiti 	/*
3302fd875dcaSMike Kravetz 	 * Check for a node specific request.
3303fd875dcaSMike Kravetz 	 * Changing node specific huge page count may require a corresponding
3304fd875dcaSMike Kravetz 	 * change to the global count.  In any case, the passed node mask
3305fd875dcaSMike Kravetz 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3306fd875dcaSMike Kravetz 	 */
3307fd875dcaSMike Kravetz 	if (nid != NUMA_NO_NODE) {
3308fd875dcaSMike Kravetz 		unsigned long old_count = count;
3309fd875dcaSMike Kravetz 
3310fd875dcaSMike Kravetz 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3311fd875dcaSMike Kravetz 		/*
3312fd875dcaSMike Kravetz 		 * User may have specified a large count value which caused the
3313fd875dcaSMike Kravetz 		 * above calculation to overflow.  In this case, they wanted
3314fd875dcaSMike Kravetz 		 * to allocate as many huge pages as possible.  Set count to
3315fd875dcaSMike Kravetz 		 * largest possible value to align with their intention.
3316fd875dcaSMike Kravetz 		 */
3317fd875dcaSMike Kravetz 		if (count < old_count)
3318fd875dcaSMike Kravetz 			count = ULONG_MAX;
3319fd875dcaSMike Kravetz 	}
3320fd875dcaSMike Kravetz 
3321fd875dcaSMike Kravetz 	/*
33224eb0716eSAlexandre Ghiti 	 * Gigantic pages runtime allocation depend on the capability for large
33234eb0716eSAlexandre Ghiti 	 * page range allocation.
33244eb0716eSAlexandre Ghiti 	 * If the system does not provide this feature, return an error when
33254eb0716eSAlexandre Ghiti 	 * the user tries to allocate gigantic pages but let the user free the
33264eb0716eSAlexandre Ghiti 	 * boottime allocated gigantic pages.
33274eb0716eSAlexandre Ghiti 	 */
33284eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
33294eb0716eSAlexandre Ghiti 		if (count > persistent_huge_pages(h)) {
3330db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
333129383967SMike Kravetz 			mutex_unlock(&h->resize_lock);
3332f60858f9SMike Kravetz 			NODEMASK_FREE(node_alloc_noretry);
33334eb0716eSAlexandre Ghiti 			return -EINVAL;
33344eb0716eSAlexandre Ghiti 		}
33354eb0716eSAlexandre Ghiti 		/* Fall through to decrease pool */
33364eb0716eSAlexandre Ghiti 	}
3337aa888a74SAndi Kleen 
33387893d1d5SAdam Litke 	/*
33397893d1d5SAdam Litke 	 * Increase the pool size
33407893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
33417893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
3342d1c3fb1fSNishanth Aravamudan 	 *
33430c397daeSMichal Hocko 	 * We might race with alloc_surplus_huge_page() here and be unable
3344d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
3345d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
3346d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
3347d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
33487893d1d5SAdam Litke 	 */
3349a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
33506ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
33517893d1d5SAdam Litke 			break;
33527893d1d5SAdam Litke 	}
33537893d1d5SAdam Litke 
3354a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
33557893d1d5SAdam Litke 		/*
33567893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
33577893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
33587893d1d5SAdam Litke 		 * and reducing the surplus.
33597893d1d5SAdam Litke 		 */
3360db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
3361649920c6SJia He 
3362649920c6SJia He 		/* yield cpu to avoid soft lockup */
3363649920c6SJia He 		cond_resched();
3364649920c6SJia He 
3365f60858f9SMike Kravetz 		ret = alloc_pool_huge_page(h, nodes_allowed,
3366f60858f9SMike Kravetz 						node_alloc_noretry);
3367db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
33687893d1d5SAdam Litke 		if (!ret)
33697893d1d5SAdam Litke 			goto out;
33707893d1d5SAdam Litke 
3371536240f2SMel Gorman 		/* Bail for signals. Probably ctrl-c from user */
3372536240f2SMel Gorman 		if (signal_pending(current))
3373536240f2SMel Gorman 			goto out;
33747893d1d5SAdam Litke 	}
33757893d1d5SAdam Litke 
33767893d1d5SAdam Litke 	/*
33777893d1d5SAdam Litke 	 * Decrease the pool size
33787893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
33797893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
33807893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
33817893d1d5SAdam Litke 	 * to the desired size as pages become free.
3382d1c3fb1fSNishanth Aravamudan 	 *
3383d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
3384d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
3385d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
33860c397daeSMichal Hocko 	 * alloc_surplus_huge_page() is checking the global counter,
3387d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
3388d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
3389d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
33907893d1d5SAdam Litke 	 */
3391a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
33926b0c880dSAdam Litke 	min_count = max(count, min_count);
33936ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
339410c6ec49SMike Kravetz 
339510c6ec49SMike Kravetz 	/*
339610c6ec49SMike Kravetz 	 * Collect pages to be removed on list without dropping lock
339710c6ec49SMike Kravetz 	 */
3398a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
339910c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, nodes_allowed, 0);
340010c6ec49SMike Kravetz 		if (!page)
34011da177e4SLinus Torvalds 			break;
340210c6ec49SMike Kravetz 
340310c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
34041da177e4SLinus Torvalds 	}
340510c6ec49SMike Kravetz 	/* free the pages after dropping lock */
3406db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
340710c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3408b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3409db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
341010c6ec49SMike Kravetz 
3411a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
34126ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
34137893d1d5SAdam Litke 			break;
34147893d1d5SAdam Litke 	}
34157893d1d5SAdam Litke out:
34164eb0716eSAlexandre Ghiti 	h->max_huge_pages = persistent_huge_pages(h);
3417db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
341829383967SMike Kravetz 	mutex_unlock(&h->resize_lock);
34194eb0716eSAlexandre Ghiti 
3420f60858f9SMike Kravetz 	NODEMASK_FREE(node_alloc_noretry);
3421f60858f9SMike Kravetz 
34224eb0716eSAlexandre Ghiti 	return 0;
34231da177e4SLinus Torvalds }
34241da177e4SLinus Torvalds 
34258531fc6fSMike Kravetz static int demote_free_huge_page(struct hstate *h, struct page *page)
34268531fc6fSMike Kravetz {
34278531fc6fSMike Kravetz 	int i, nid = page_to_nid(page);
34288531fc6fSMike Kravetz 	struct hstate *target_hstate;
342931731452SDoug Berger 	struct page *subpage;
34308531fc6fSMike Kravetz 	int rc = 0;
34318531fc6fSMike Kravetz 
34328531fc6fSMike Kravetz 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
34338531fc6fSMike Kravetz 
34348531fc6fSMike Kravetz 	remove_hugetlb_page_for_demote(h, page, false);
34358531fc6fSMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
34368531fc6fSMike Kravetz 
34376213834cSMuchun Song 	rc = hugetlb_vmemmap_restore(h, page);
34388531fc6fSMike Kravetz 	if (rc) {
34398531fc6fSMike Kravetz 		/* Allocation of vmemmmap failed, we can not demote page */
34408531fc6fSMike Kravetz 		spin_lock_irq(&hugetlb_lock);
34418531fc6fSMike Kravetz 		set_page_refcounted(page);
34428531fc6fSMike Kravetz 		add_hugetlb_page(h, page, false);
34438531fc6fSMike Kravetz 		return rc;
34448531fc6fSMike Kravetz 	}
34458531fc6fSMike Kravetz 
34468531fc6fSMike Kravetz 	/*
34478531fc6fSMike Kravetz 	 * Use destroy_compound_hugetlb_page_for_demote for all huge page
34488531fc6fSMike Kravetz 	 * sizes as it will not ref count pages.
34498531fc6fSMike Kravetz 	 */
34508531fc6fSMike Kravetz 	destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
34518531fc6fSMike Kravetz 
34528531fc6fSMike Kravetz 	/*
34538531fc6fSMike Kravetz 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
34548531fc6fSMike Kravetz 	 * Without the mutex, pages added to target hstate could be marked
34558531fc6fSMike Kravetz 	 * as surplus.
34568531fc6fSMike Kravetz 	 *
34578531fc6fSMike Kravetz 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
34588531fc6fSMike Kravetz 	 * use the convention of always taking larger size hstate mutex first.
34598531fc6fSMike Kravetz 	 */
34608531fc6fSMike Kravetz 	mutex_lock(&target_hstate->resize_lock);
34618531fc6fSMike Kravetz 	for (i = 0; i < pages_per_huge_page(h);
34628531fc6fSMike Kravetz 				i += pages_per_huge_page(target_hstate)) {
346331731452SDoug Berger 		subpage = nth_page(page, i);
34648531fc6fSMike Kravetz 		if (hstate_is_gigantic(target_hstate))
346531731452SDoug Berger 			prep_compound_gigantic_page_for_demote(subpage,
34668531fc6fSMike Kravetz 							target_hstate->order);
34678531fc6fSMike Kravetz 		else
346831731452SDoug Berger 			prep_compound_page(subpage, target_hstate->order);
346931731452SDoug Berger 		set_page_private(subpage, 0);
347031731452SDoug Berger 		set_page_refcounted(subpage);
347131731452SDoug Berger 		prep_new_huge_page(target_hstate, subpage, nid);
347231731452SDoug Berger 		put_page(subpage);
34738531fc6fSMike Kravetz 	}
34748531fc6fSMike Kravetz 	mutex_unlock(&target_hstate->resize_lock);
34758531fc6fSMike Kravetz 
34768531fc6fSMike Kravetz 	spin_lock_irq(&hugetlb_lock);
34778531fc6fSMike Kravetz 
34788531fc6fSMike Kravetz 	/*
34798531fc6fSMike Kravetz 	 * Not absolutely necessary, but for consistency update max_huge_pages
34808531fc6fSMike Kravetz 	 * based on pool changes for the demoted page.
34818531fc6fSMike Kravetz 	 */
34828531fc6fSMike Kravetz 	h->max_huge_pages--;
3483a43a83c7SMiaohe Lin 	target_hstate->max_huge_pages +=
3484a43a83c7SMiaohe Lin 		pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
34858531fc6fSMike Kravetz 
34868531fc6fSMike Kravetz 	return rc;
34878531fc6fSMike Kravetz }
34888531fc6fSMike Kravetz 
348979dfc695SMike Kravetz static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
349079dfc695SMike Kravetz 	__must_hold(&hugetlb_lock)
349179dfc695SMike Kravetz {
34928531fc6fSMike Kravetz 	int nr_nodes, node;
34938531fc6fSMike Kravetz 	struct page *page;
349479dfc695SMike Kravetz 
349579dfc695SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
349679dfc695SMike Kravetz 
349779dfc695SMike Kravetz 	/* We should never get here if no demote order */
349879dfc695SMike Kravetz 	if (!h->demote_order) {
349979dfc695SMike Kravetz 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
350079dfc695SMike Kravetz 		return -EINVAL;		/* internal error */
350179dfc695SMike Kravetz 	}
350279dfc695SMike Kravetz 
35038531fc6fSMike Kravetz 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
35045a317412SMike Kravetz 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
35055a317412SMike Kravetz 			if (PageHWPoison(page))
35065a317412SMike Kravetz 				continue;
35075a317412SMike Kravetz 
35085a317412SMike Kravetz 			return demote_free_huge_page(h, page);
35098531fc6fSMike Kravetz 		}
35108531fc6fSMike Kravetz 	}
35118531fc6fSMike Kravetz 
35125a317412SMike Kravetz 	/*
35135a317412SMike Kravetz 	 * Only way to get here is if all pages on free lists are poisoned.
35145a317412SMike Kravetz 	 * Return -EBUSY so that caller will not retry.
35155a317412SMike Kravetz 	 */
35165a317412SMike Kravetz 	return -EBUSY;
351779dfc695SMike Kravetz }
351879dfc695SMike Kravetz 
3519a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
3520a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3521a3437870SNishanth Aravamudan 
352279dfc695SMike Kravetz #define HSTATE_ATTR_WO(_name) \
352379dfc695SMike Kravetz 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
352479dfc695SMike Kravetz 
3525a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
352698bc26acSMiaohe Lin 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3527a3437870SNishanth Aravamudan 
3528a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
3529a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3530a3437870SNishanth Aravamudan 
35319a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
35329a305230SLee Schermerhorn 
35339a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3534a3437870SNishanth Aravamudan {
3535a3437870SNishanth Aravamudan 	int i;
35369a305230SLee Schermerhorn 
3537a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
35389a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
35399a305230SLee Schermerhorn 			if (nidp)
35409a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
3541a3437870SNishanth Aravamudan 			return &hstates[i];
35429a305230SLee Schermerhorn 		}
35439a305230SLee Schermerhorn 
35449a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
3545a3437870SNishanth Aravamudan }
3546a3437870SNishanth Aravamudan 
354706808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3548a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3549a3437870SNishanth Aravamudan {
35509a305230SLee Schermerhorn 	struct hstate *h;
35519a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
35529a305230SLee Schermerhorn 	int nid;
35539a305230SLee Schermerhorn 
35549a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
35559a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
35569a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
35579a305230SLee Schermerhorn 	else
35589a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
35599a305230SLee Schermerhorn 
3560ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3561a3437870SNishanth Aravamudan }
3562adbe8726SEric B Munson 
3563238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3564238d3c13SDavid Rientjes 					   struct hstate *h, int nid,
3565238d3c13SDavid Rientjes 					   unsigned long count, size_t len)
3566a3437870SNishanth Aravamudan {
3567a3437870SNishanth Aravamudan 	int err;
35682d0adf7eSOscar Salvador 	nodemask_t nodes_allowed, *n_mask;
3569a3437870SNishanth Aravamudan 
35702d0adf7eSOscar Salvador 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
35712d0adf7eSOscar Salvador 		return -EINVAL;
3572adbe8726SEric B Munson 
35739a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
35749a305230SLee Schermerhorn 		/*
35759a305230SLee Schermerhorn 		 * global hstate attribute
35769a305230SLee Schermerhorn 		 */
35779a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
35782d0adf7eSOscar Salvador 				init_nodemask_of_mempolicy(&nodes_allowed)))
35792d0adf7eSOscar Salvador 			n_mask = &node_states[N_MEMORY];
35802d0adf7eSOscar Salvador 		else
35812d0adf7eSOscar Salvador 			n_mask = &nodes_allowed;
35822d0adf7eSOscar Salvador 	} else {
35839a305230SLee Schermerhorn 		/*
3584fd875dcaSMike Kravetz 		 * Node specific request.  count adjustment happens in
3585fd875dcaSMike Kravetz 		 * set_max_huge_pages() after acquiring hugetlb_lock.
35869a305230SLee Schermerhorn 		 */
35872d0adf7eSOscar Salvador 		init_nodemask_of_node(&nodes_allowed, nid);
35882d0adf7eSOscar Salvador 		n_mask = &nodes_allowed;
3589fd875dcaSMike Kravetz 	}
35909a305230SLee Schermerhorn 
35912d0adf7eSOscar Salvador 	err = set_max_huge_pages(h, count, nid, n_mask);
359206808b08SLee Schermerhorn 
35934eb0716eSAlexandre Ghiti 	return err ? err : len;
359406808b08SLee Schermerhorn }
359506808b08SLee Schermerhorn 
3596238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3597238d3c13SDavid Rientjes 					 struct kobject *kobj, const char *buf,
3598238d3c13SDavid Rientjes 					 size_t len)
3599238d3c13SDavid Rientjes {
3600238d3c13SDavid Rientjes 	struct hstate *h;
3601238d3c13SDavid Rientjes 	unsigned long count;
3602238d3c13SDavid Rientjes 	int nid;
3603238d3c13SDavid Rientjes 	int err;
3604238d3c13SDavid Rientjes 
3605238d3c13SDavid Rientjes 	err = kstrtoul(buf, 10, &count);
3606238d3c13SDavid Rientjes 	if (err)
3607238d3c13SDavid Rientjes 		return err;
3608238d3c13SDavid Rientjes 
3609238d3c13SDavid Rientjes 	h = kobj_to_hstate(kobj, &nid);
3610238d3c13SDavid Rientjes 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3611238d3c13SDavid Rientjes }
3612238d3c13SDavid Rientjes 
361306808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
361406808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
361506808b08SLee Schermerhorn {
361606808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
361706808b08SLee Schermerhorn }
361806808b08SLee Schermerhorn 
361906808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
362006808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
362106808b08SLee Schermerhorn {
3622238d3c13SDavid Rientjes 	return nr_hugepages_store_common(false, kobj, buf, len);
3623a3437870SNishanth Aravamudan }
3624a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
3625a3437870SNishanth Aravamudan 
362606808b08SLee Schermerhorn #ifdef CONFIG_NUMA
362706808b08SLee Schermerhorn 
362806808b08SLee Schermerhorn /*
362906808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
363006808b08SLee Schermerhorn  * huge page alloc/free.
363106808b08SLee Schermerhorn  */
363206808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3633ae7a927dSJoe Perches 					   struct kobj_attribute *attr,
3634ae7a927dSJoe Perches 					   char *buf)
363506808b08SLee Schermerhorn {
363606808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
363706808b08SLee Schermerhorn }
363806808b08SLee Schermerhorn 
363906808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
364006808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
364106808b08SLee Schermerhorn {
3642238d3c13SDavid Rientjes 	return nr_hugepages_store_common(true, kobj, buf, len);
364306808b08SLee Schermerhorn }
364406808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
364506808b08SLee Schermerhorn #endif
364606808b08SLee Schermerhorn 
364706808b08SLee Schermerhorn 
3648a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3649a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3650a3437870SNishanth Aravamudan {
36519a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3652ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3653a3437870SNishanth Aravamudan }
3654adbe8726SEric B Munson 
3655a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3656a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
3657a3437870SNishanth Aravamudan {
3658a3437870SNishanth Aravamudan 	int err;
3659a3437870SNishanth Aravamudan 	unsigned long input;
36609a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3661a3437870SNishanth Aravamudan 
3662bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3663adbe8726SEric B Munson 		return -EINVAL;
3664adbe8726SEric B Munson 
36653dbb95f7SJingoo Han 	err = kstrtoul(buf, 10, &input);
3666a3437870SNishanth Aravamudan 	if (err)
366773ae31e5SEric B Munson 		return err;
3668a3437870SNishanth Aravamudan 
3669db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
3670a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
3671db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
3672a3437870SNishanth Aravamudan 
3673a3437870SNishanth Aravamudan 	return count;
3674a3437870SNishanth Aravamudan }
3675a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
3676a3437870SNishanth Aravamudan 
3677a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
3678a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3679a3437870SNishanth Aravamudan {
36809a305230SLee Schermerhorn 	struct hstate *h;
36819a305230SLee Schermerhorn 	unsigned long free_huge_pages;
36829a305230SLee Schermerhorn 	int nid;
36839a305230SLee Schermerhorn 
36849a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
36859a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
36869a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
36879a305230SLee Schermerhorn 	else
36889a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
36899a305230SLee Schermerhorn 
3690ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3691a3437870SNishanth Aravamudan }
3692a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
3693a3437870SNishanth Aravamudan 
3694a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
3695a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3696a3437870SNishanth Aravamudan {
36979a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3698ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3699a3437870SNishanth Aravamudan }
3700a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
3701a3437870SNishanth Aravamudan 
3702a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
3703a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3704a3437870SNishanth Aravamudan {
37059a305230SLee Schermerhorn 	struct hstate *h;
37069a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
37079a305230SLee Schermerhorn 	int nid;
37089a305230SLee Schermerhorn 
37099a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
37109a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
37119a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
37129a305230SLee Schermerhorn 	else
37139a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
37149a305230SLee Schermerhorn 
3715ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3716a3437870SNishanth Aravamudan }
3717a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
3718a3437870SNishanth Aravamudan 
371979dfc695SMike Kravetz static ssize_t demote_store(struct kobject *kobj,
372079dfc695SMike Kravetz 	       struct kobj_attribute *attr, const char *buf, size_t len)
372179dfc695SMike Kravetz {
372279dfc695SMike Kravetz 	unsigned long nr_demote;
372379dfc695SMike Kravetz 	unsigned long nr_available;
372479dfc695SMike Kravetz 	nodemask_t nodes_allowed, *n_mask;
372579dfc695SMike Kravetz 	struct hstate *h;
37268eeda55fSLi zeming 	int err;
372779dfc695SMike Kravetz 	int nid;
372879dfc695SMike Kravetz 
372979dfc695SMike Kravetz 	err = kstrtoul(buf, 10, &nr_demote);
373079dfc695SMike Kravetz 	if (err)
373179dfc695SMike Kravetz 		return err;
373279dfc695SMike Kravetz 	h = kobj_to_hstate(kobj, &nid);
373379dfc695SMike Kravetz 
373479dfc695SMike Kravetz 	if (nid != NUMA_NO_NODE) {
373579dfc695SMike Kravetz 		init_nodemask_of_node(&nodes_allowed, nid);
373679dfc695SMike Kravetz 		n_mask = &nodes_allowed;
373779dfc695SMike Kravetz 	} else {
373879dfc695SMike Kravetz 		n_mask = &node_states[N_MEMORY];
373979dfc695SMike Kravetz 	}
374079dfc695SMike Kravetz 
374179dfc695SMike Kravetz 	/* Synchronize with other sysfs operations modifying huge pages */
374279dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
374379dfc695SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
374479dfc695SMike Kravetz 
374579dfc695SMike Kravetz 	while (nr_demote) {
374679dfc695SMike Kravetz 		/*
374779dfc695SMike Kravetz 		 * Check for available pages to demote each time thorough the
374879dfc695SMike Kravetz 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
374979dfc695SMike Kravetz 		 */
375079dfc695SMike Kravetz 		if (nid != NUMA_NO_NODE)
375179dfc695SMike Kravetz 			nr_available = h->free_huge_pages_node[nid];
375279dfc695SMike Kravetz 		else
375379dfc695SMike Kravetz 			nr_available = h->free_huge_pages;
375479dfc695SMike Kravetz 		nr_available -= h->resv_huge_pages;
375579dfc695SMike Kravetz 		if (!nr_available)
375679dfc695SMike Kravetz 			break;
375779dfc695SMike Kravetz 
375879dfc695SMike Kravetz 		err = demote_pool_huge_page(h, n_mask);
375979dfc695SMike Kravetz 		if (err)
376079dfc695SMike Kravetz 			break;
376179dfc695SMike Kravetz 
376279dfc695SMike Kravetz 		nr_demote--;
376379dfc695SMike Kravetz 	}
376479dfc695SMike Kravetz 
376579dfc695SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
376679dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
376779dfc695SMike Kravetz 
376879dfc695SMike Kravetz 	if (err)
376979dfc695SMike Kravetz 		return err;
377079dfc695SMike Kravetz 	return len;
377179dfc695SMike Kravetz }
377279dfc695SMike Kravetz HSTATE_ATTR_WO(demote);
377379dfc695SMike Kravetz 
377479dfc695SMike Kravetz static ssize_t demote_size_show(struct kobject *kobj,
377579dfc695SMike Kravetz 					struct kobj_attribute *attr, char *buf)
377679dfc695SMike Kravetz {
377712658abfSMiaohe Lin 	struct hstate *h = kobj_to_hstate(kobj, NULL);
377879dfc695SMike Kravetz 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
377979dfc695SMike Kravetz 
378079dfc695SMike Kravetz 	return sysfs_emit(buf, "%lukB\n", demote_size);
378179dfc695SMike Kravetz }
378279dfc695SMike Kravetz 
378379dfc695SMike Kravetz static ssize_t demote_size_store(struct kobject *kobj,
378479dfc695SMike Kravetz 					struct kobj_attribute *attr,
378579dfc695SMike Kravetz 					const char *buf, size_t count)
378679dfc695SMike Kravetz {
378779dfc695SMike Kravetz 	struct hstate *h, *demote_hstate;
378879dfc695SMike Kravetz 	unsigned long demote_size;
378979dfc695SMike Kravetz 	unsigned int demote_order;
379079dfc695SMike Kravetz 
379179dfc695SMike Kravetz 	demote_size = (unsigned long)memparse(buf, NULL);
379279dfc695SMike Kravetz 
379379dfc695SMike Kravetz 	demote_hstate = size_to_hstate(demote_size);
379479dfc695SMike Kravetz 	if (!demote_hstate)
379579dfc695SMike Kravetz 		return -EINVAL;
379679dfc695SMike Kravetz 	demote_order = demote_hstate->order;
3797a01f4390SMike Kravetz 	if (demote_order < HUGETLB_PAGE_ORDER)
3798a01f4390SMike Kravetz 		return -EINVAL;
379979dfc695SMike Kravetz 
380079dfc695SMike Kravetz 	/* demote order must be smaller than hstate order */
380112658abfSMiaohe Lin 	h = kobj_to_hstate(kobj, NULL);
380279dfc695SMike Kravetz 	if (demote_order >= h->order)
380379dfc695SMike Kravetz 		return -EINVAL;
380479dfc695SMike Kravetz 
380579dfc695SMike Kravetz 	/* resize_lock synchronizes access to demote size and writes */
380679dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
380779dfc695SMike Kravetz 	h->demote_order = demote_order;
380879dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
380979dfc695SMike Kravetz 
381079dfc695SMike Kravetz 	return count;
381179dfc695SMike Kravetz }
381279dfc695SMike Kravetz HSTATE_ATTR(demote_size);
381379dfc695SMike Kravetz 
3814a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
3815a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
3816a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
3817a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
3818a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
3819a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
382006808b08SLee Schermerhorn #ifdef CONFIG_NUMA
382106808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
382206808b08SLee Schermerhorn #endif
3823a3437870SNishanth Aravamudan 	NULL,
3824a3437870SNishanth Aravamudan };
3825a3437870SNishanth Aravamudan 
382667e5ed96SArvind Yadav static const struct attribute_group hstate_attr_group = {
3827a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
3828a3437870SNishanth Aravamudan };
3829a3437870SNishanth Aravamudan 
383079dfc695SMike Kravetz static struct attribute *hstate_demote_attrs[] = {
383179dfc695SMike Kravetz 	&demote_size_attr.attr,
383279dfc695SMike Kravetz 	&demote_attr.attr,
383379dfc695SMike Kravetz 	NULL,
383479dfc695SMike Kravetz };
383579dfc695SMike Kravetz 
383679dfc695SMike Kravetz static const struct attribute_group hstate_demote_attr_group = {
383779dfc695SMike Kravetz 	.attrs = hstate_demote_attrs,
383879dfc695SMike Kravetz };
383979dfc695SMike Kravetz 
3840094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
38419a305230SLee Schermerhorn 				    struct kobject **hstate_kobjs,
384267e5ed96SArvind Yadav 				    const struct attribute_group *hstate_attr_group)
3843a3437870SNishanth Aravamudan {
3844a3437870SNishanth Aravamudan 	int retval;
3845972dc4deSAneesh Kumar K.V 	int hi = hstate_index(h);
3846a3437870SNishanth Aravamudan 
38479a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
38489a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
3849a3437870SNishanth Aravamudan 		return -ENOMEM;
3850a3437870SNishanth Aravamudan 
38519a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3852cc2205a6SMiaohe Lin 	if (retval) {
38539a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
3854cc2205a6SMiaohe Lin 		hstate_kobjs[hi] = NULL;
38553a6bdda0SMiaohe Lin 		return retval;
3856cc2205a6SMiaohe Lin 	}
3857a3437870SNishanth Aravamudan 
385879dfc695SMike Kravetz 	if (h->demote_order) {
385901088a60SMiaohe Lin 		retval = sysfs_create_group(hstate_kobjs[hi],
386001088a60SMiaohe Lin 					    &hstate_demote_attr_group);
386101088a60SMiaohe Lin 		if (retval) {
386279dfc695SMike Kravetz 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
386301088a60SMiaohe Lin 			sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
386401088a60SMiaohe Lin 			kobject_put(hstate_kobjs[hi]);
386501088a60SMiaohe Lin 			hstate_kobjs[hi] = NULL;
386601088a60SMiaohe Lin 			return retval;
386701088a60SMiaohe Lin 		}
386879dfc695SMike Kravetz 	}
386979dfc695SMike Kravetz 
387001088a60SMiaohe Lin 	return 0;
3871a3437870SNishanth Aravamudan }
3872a3437870SNishanth Aravamudan 
3873a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void)
3874a3437870SNishanth Aravamudan {
3875a3437870SNishanth Aravamudan 	struct hstate *h;
3876a3437870SNishanth Aravamudan 	int err;
3877a3437870SNishanth Aravamudan 
3878a3437870SNishanth Aravamudan 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
3879a3437870SNishanth Aravamudan 	if (!hugepages_kobj)
3880a3437870SNishanth Aravamudan 		return;
3881a3437870SNishanth Aravamudan 
3882a3437870SNishanth Aravamudan 	for_each_hstate(h) {
38839a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
38849a305230SLee Schermerhorn 					 hstate_kobjs, &hstate_attr_group);
3885a3437870SNishanth Aravamudan 		if (err)
3886282f4214SMike Kravetz 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3887a3437870SNishanth Aravamudan 	}
3888a3437870SNishanth Aravamudan }
3889a3437870SNishanth Aravamudan 
38909a305230SLee Schermerhorn #ifdef CONFIG_NUMA
38919a305230SLee Schermerhorn 
38929a305230SLee Schermerhorn /*
38939a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
389410fbcf4cSKay Sievers  * with node devices in node_devices[] using a parallel array.  The array
389510fbcf4cSKay Sievers  * index of a node device or _hstate == node id.
389610fbcf4cSKay Sievers  * This is here to avoid any static dependency of the node device driver, in
38979a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
38989a305230SLee Schermerhorn  */
38999a305230SLee Schermerhorn struct node_hstate {
39009a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
39019a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
39029a305230SLee Schermerhorn };
3903b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
39049a305230SLee Schermerhorn 
39059a305230SLee Schermerhorn /*
390610fbcf4cSKay Sievers  * A subset of global hstate attributes for node devices
39079a305230SLee Schermerhorn  */
39089a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
39099a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
39109a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
39119a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
39129a305230SLee Schermerhorn 	NULL,
39139a305230SLee Schermerhorn };
39149a305230SLee Schermerhorn 
391567e5ed96SArvind Yadav static const struct attribute_group per_node_hstate_attr_group = {
39169a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
39179a305230SLee Schermerhorn };
39189a305230SLee Schermerhorn 
39199a305230SLee Schermerhorn /*
392010fbcf4cSKay Sievers  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
39219a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
39229a305230SLee Schermerhorn  */
39239a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
39249a305230SLee Schermerhorn {
39259a305230SLee Schermerhorn 	int nid;
39269a305230SLee Schermerhorn 
39279a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
39289a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
39299a305230SLee Schermerhorn 		int i;
39309a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
39319a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
39329a305230SLee Schermerhorn 				if (nidp)
39339a305230SLee Schermerhorn 					*nidp = nid;
39349a305230SLee Schermerhorn 				return &hstates[i];
39359a305230SLee Schermerhorn 			}
39369a305230SLee Schermerhorn 	}
39379a305230SLee Schermerhorn 
39389a305230SLee Schermerhorn 	BUG();
39399a305230SLee Schermerhorn 	return NULL;
39409a305230SLee Schermerhorn }
39419a305230SLee Schermerhorn 
39429a305230SLee Schermerhorn /*
394310fbcf4cSKay Sievers  * Unregister hstate attributes from a single node device.
39449a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
39459a305230SLee Schermerhorn  */
39463cd8b44fSClaudiu Ghioc static void hugetlb_unregister_node(struct node *node)
39479a305230SLee Schermerhorn {
39489a305230SLee Schermerhorn 	struct hstate *h;
394910fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39509a305230SLee Schermerhorn 
39519a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39529b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
39539a305230SLee Schermerhorn 
3954972dc4deSAneesh Kumar K.V 	for_each_hstate(h) {
3955972dc4deSAneesh Kumar K.V 		int idx = hstate_index(h);
395601088a60SMiaohe Lin 		struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
395701088a60SMiaohe Lin 
395801088a60SMiaohe Lin 		if (!hstate_kobj)
395901088a60SMiaohe Lin 			continue;
396001088a60SMiaohe Lin 		if (h->demote_order)
396101088a60SMiaohe Lin 			sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
396201088a60SMiaohe Lin 		sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
396301088a60SMiaohe Lin 		kobject_put(hstate_kobj);
3964972dc4deSAneesh Kumar K.V 		nhs->hstate_kobjs[idx] = NULL;
3965972dc4deSAneesh Kumar K.V 	}
39669a305230SLee Schermerhorn 
39679a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
39689a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
39699a305230SLee Schermerhorn }
39709a305230SLee Schermerhorn 
39719a305230SLee Schermerhorn 
39729a305230SLee Schermerhorn /*
397310fbcf4cSKay Sievers  * Register hstate attributes for a single node device.
39749a305230SLee Schermerhorn  * No-op if attributes already registered.
39759a305230SLee Schermerhorn  */
39763cd8b44fSClaudiu Ghioc static void hugetlb_register_node(struct node *node)
39779a305230SLee Schermerhorn {
39789a305230SLee Schermerhorn 	struct hstate *h;
397910fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39809a305230SLee Schermerhorn 	int err;
39819a305230SLee Schermerhorn 
39829a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
39839a305230SLee Schermerhorn 		return;		/* already allocated */
39849a305230SLee Schermerhorn 
39859a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
398610fbcf4cSKay Sievers 							&node->dev.kobj);
39879a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39889a305230SLee Schermerhorn 		return;
39899a305230SLee Schermerhorn 
39909a305230SLee Schermerhorn 	for_each_hstate(h) {
39919a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
39929a305230SLee Schermerhorn 						nhs->hstate_kobjs,
39939a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
39949a305230SLee Schermerhorn 		if (err) {
3995282f4214SMike Kravetz 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
399610fbcf4cSKay Sievers 				h->name, node->dev.id);
39979a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
39989a305230SLee Schermerhorn 			break;
39999a305230SLee Schermerhorn 		}
40009a305230SLee Schermerhorn 	}
40019a305230SLee Schermerhorn }
40029a305230SLee Schermerhorn 
40039a305230SLee Schermerhorn /*
40049b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
400510fbcf4cSKay Sievers  * devices of nodes that have memory.  All on-line nodes should have
400610fbcf4cSKay Sievers  * registered their associated device by this time.
40079a305230SLee Schermerhorn  */
40087d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
40099a305230SLee Schermerhorn {
40109a305230SLee Schermerhorn 	int nid;
40119a305230SLee Schermerhorn 
40128cebfcd0SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
40138732794bSWen Congyang 		struct node *node = node_devices[nid];
401410fbcf4cSKay Sievers 		if (node->dev.id == nid)
40159a305230SLee Schermerhorn 			hugetlb_register_node(node);
40169a305230SLee Schermerhorn 	}
40179a305230SLee Schermerhorn 
40189a305230SLee Schermerhorn 	/*
401910fbcf4cSKay Sievers 	 * Let the node device driver know we're here so it can
40209a305230SLee Schermerhorn 	 * [un]register hstate attributes on node hotplug.
40219a305230SLee Schermerhorn 	 */
40229a305230SLee Schermerhorn 	register_hugetlbfs_with_node(hugetlb_register_node,
40239a305230SLee Schermerhorn 				     hugetlb_unregister_node);
40249a305230SLee Schermerhorn }
40259a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
40269a305230SLee Schermerhorn 
40279a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
40289a305230SLee Schermerhorn {
40299a305230SLee Schermerhorn 	BUG();
40309a305230SLee Schermerhorn 	if (nidp)
40319a305230SLee Schermerhorn 		*nidp = -1;
40329a305230SLee Schermerhorn 	return NULL;
40339a305230SLee Schermerhorn }
40349a305230SLee Schermerhorn 
40359a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
40369a305230SLee Schermerhorn 
40379a305230SLee Schermerhorn #endif
40389a305230SLee Schermerhorn 
4039263b8998SMiaohe Lin #ifdef CONFIG_CMA
4040263b8998SMiaohe Lin static void __init hugetlb_cma_check(void);
4041263b8998SMiaohe Lin #else
4042263b8998SMiaohe Lin static inline __init void hugetlb_cma_check(void)
4043263b8998SMiaohe Lin {
4044263b8998SMiaohe Lin }
4045263b8998SMiaohe Lin #endif
4046263b8998SMiaohe Lin 
4047a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
4048a3437870SNishanth Aravamudan {
40498382d914SDavidlohr Bueso 	int i;
40508382d914SDavidlohr Bueso 
4051d6995da3SMike Kravetz 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4052d6995da3SMike Kravetz 			__NR_HPAGEFLAGS);
4053d6995da3SMike Kravetz 
4054c2833a5bSMike Kravetz 	if (!hugepages_supported()) {
4055c2833a5bSMike Kravetz 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4056c2833a5bSMike Kravetz 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
40570ef89d25SBenjamin Herrenschmidt 		return 0;
4058d715cf80SLiam R. Howlett 	}
4059d715cf80SLiam R. Howlett 
4060282f4214SMike Kravetz 	/*
4061282f4214SMike Kravetz 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4062282f4214SMike Kravetz 	 * architectures depend on setup being done here.
4063282f4214SMike Kravetz 	 */
4064a3437870SNishanth Aravamudan 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4065282f4214SMike Kravetz 	if (!parsed_default_hugepagesz) {
4066282f4214SMike Kravetz 		/*
4067282f4214SMike Kravetz 		 * If we did not parse a default huge page size, set
4068282f4214SMike Kravetz 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4069282f4214SMike Kravetz 		 * number of huge pages for this default size was implicitly
4070282f4214SMike Kravetz 		 * specified, set that here as well.
4071282f4214SMike Kravetz 		 * Note that the implicit setting will overwrite an explicit
4072282f4214SMike Kravetz 		 * setting.  A warning will be printed in this case.
4073282f4214SMike Kravetz 		 */
4074282f4214SMike Kravetz 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4075f8b74815SVaishali Thakkar 		if (default_hstate_max_huge_pages) {
4076282f4214SMike Kravetz 			if (default_hstate.max_huge_pages) {
4077282f4214SMike Kravetz 				char buf[32];
4078282f4214SMike Kravetz 
4079282f4214SMike Kravetz 				string_get_size(huge_page_size(&default_hstate),
4080282f4214SMike Kravetz 					1, STRING_UNITS_2, buf, 32);
4081282f4214SMike Kravetz 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4082282f4214SMike Kravetz 					default_hstate.max_huge_pages, buf);
4083282f4214SMike Kravetz 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4084282f4214SMike Kravetz 					default_hstate_max_huge_pages);
4085282f4214SMike Kravetz 			}
4086282f4214SMike Kravetz 			default_hstate.max_huge_pages =
4087282f4214SMike Kravetz 				default_hstate_max_huge_pages;
4088b5389086SZhenguo Yao 
40890a7a0f6fSPeng Liu 			for_each_online_node(i)
4090b5389086SZhenguo Yao 				default_hstate.max_huge_pages_node[i] =
4091b5389086SZhenguo Yao 					default_hugepages_in_node[i];
4092282f4214SMike Kravetz 		}
4093f8b74815SVaishali Thakkar 	}
4094a3437870SNishanth Aravamudan 
4095cf11e85fSRoman Gushchin 	hugetlb_cma_check();
4096a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
4097aa888a74SAndi Kleen 	gather_bootmem_prealloc();
4098a3437870SNishanth Aravamudan 	report_hugepages();
4099a3437870SNishanth Aravamudan 
4100a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
41019a305230SLee Schermerhorn 	hugetlb_register_all_nodes();
41027179e7bfSJianguo Wu 	hugetlb_cgroup_file_init();
41039a305230SLee Schermerhorn 
41048382d914SDavidlohr Bueso #ifdef CONFIG_SMP
41058382d914SDavidlohr Bueso 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
41068382d914SDavidlohr Bueso #else
41078382d914SDavidlohr Bueso 	num_fault_mutexes = 1;
41088382d914SDavidlohr Bueso #endif
4109c672c7f2SMike Kravetz 	hugetlb_fault_mutex_table =
41106da2ec56SKees Cook 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
41116da2ec56SKees Cook 			      GFP_KERNEL);
4112c672c7f2SMike Kravetz 	BUG_ON(!hugetlb_fault_mutex_table);
41138382d914SDavidlohr Bueso 
41148382d914SDavidlohr Bueso 	for (i = 0; i < num_fault_mutexes; i++)
4115c672c7f2SMike Kravetz 		mutex_init(&hugetlb_fault_mutex_table[i]);
4116a3437870SNishanth Aravamudan 	return 0;
4117a3437870SNishanth Aravamudan }
41183e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
4119a3437870SNishanth Aravamudan 
4120ae94da89SMike Kravetz /* Overwritten by architectures with more huge page sizes */
4121ae94da89SMike Kravetz bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
41229fee021dSVaishali Thakkar {
4123ae94da89SMike Kravetz 	return size == HPAGE_SIZE;
41249fee021dSVaishali Thakkar }
41259fee021dSVaishali Thakkar 
4126d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
4127a3437870SNishanth Aravamudan {
4128a3437870SNishanth Aravamudan 	struct hstate *h;
41298faa8b07SAndi Kleen 	unsigned long i;
41308faa8b07SAndi Kleen 
4131a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
4132a3437870SNishanth Aravamudan 		return;
4133a3437870SNishanth Aravamudan 	}
413447d38344SAneesh Kumar K.V 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4135a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
413647d38344SAneesh Kumar K.V 	h = &hstates[hugetlb_max_hstate++];
413729383967SMike Kravetz 	mutex_init(&h->resize_lock);
4138a3437870SNishanth Aravamudan 	h->order = order;
4139aca78307SMiaohe Lin 	h->mask = ~(huge_page_size(h) - 1);
41408faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
41418faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
41420edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&h->hugepage_activelist);
414354f18d35SAndrew Morton 	h->next_nid_to_alloc = first_memory_node;
414454f18d35SAndrew Morton 	h->next_nid_to_free = first_memory_node;
4145a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4146c2c3a60aSMiaohe Lin 					huge_page_size(h)/SZ_1K);
41478faa8b07SAndi Kleen 
4148a3437870SNishanth Aravamudan 	parsed_hstate = h;
4149a3437870SNishanth Aravamudan }
4150a3437870SNishanth Aravamudan 
4151b5389086SZhenguo Yao bool __init __weak hugetlb_node_alloc_supported(void)
4152b5389086SZhenguo Yao {
4153b5389086SZhenguo Yao 	return true;
4154b5389086SZhenguo Yao }
4155f87442f4SPeng Liu 
4156f87442f4SPeng Liu static void __init hugepages_clear_pages_in_node(void)
4157f87442f4SPeng Liu {
4158f87442f4SPeng Liu 	if (!hugetlb_max_hstate) {
4159f87442f4SPeng Liu 		default_hstate_max_huge_pages = 0;
4160f87442f4SPeng Liu 		memset(default_hugepages_in_node, 0,
416110395680SMiaohe Lin 			sizeof(default_hugepages_in_node));
4162f87442f4SPeng Liu 	} else {
4163f87442f4SPeng Liu 		parsed_hstate->max_huge_pages = 0;
4164f87442f4SPeng Liu 		memset(parsed_hstate->max_huge_pages_node, 0,
416510395680SMiaohe Lin 			sizeof(parsed_hstate->max_huge_pages_node));
4166f87442f4SPeng Liu 	}
4167f87442f4SPeng Liu }
4168f87442f4SPeng Liu 
4169282f4214SMike Kravetz /*
4170282f4214SMike Kravetz  * hugepages command line processing
4171282f4214SMike Kravetz  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4172282f4214SMike Kravetz  * specification.  If not, ignore the hugepages value.  hugepages can also
4173282f4214SMike Kravetz  * be the first huge page command line  option in which case it implicitly
4174282f4214SMike Kravetz  * specifies the number of huge pages for the default size.
4175282f4214SMike Kravetz  */
4176282f4214SMike Kravetz static int __init hugepages_setup(char *s)
4177a3437870SNishanth Aravamudan {
4178a3437870SNishanth Aravamudan 	unsigned long *mhp;
41798faa8b07SAndi Kleen 	static unsigned long *last_mhp;
4180b5389086SZhenguo Yao 	int node = NUMA_NO_NODE;
4181b5389086SZhenguo Yao 	int count;
4182b5389086SZhenguo Yao 	unsigned long tmp;
4183b5389086SZhenguo Yao 	char *p = s;
4184a3437870SNishanth Aravamudan 
41859fee021dSVaishali Thakkar 	if (!parsed_valid_hugepagesz) {
4186282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
41879fee021dSVaishali Thakkar 		parsed_valid_hugepagesz = true;
4188f81f6e4bSPeng Liu 		return 1;
41899fee021dSVaishali Thakkar 	}
4190282f4214SMike Kravetz 
4191a3437870SNishanth Aravamudan 	/*
4192282f4214SMike Kravetz 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4193282f4214SMike Kravetz 	 * yet, so this hugepages= parameter goes to the "default hstate".
4194282f4214SMike Kravetz 	 * Otherwise, it goes with the previously parsed hugepagesz or
4195282f4214SMike Kravetz 	 * default_hugepagesz.
4196a3437870SNishanth Aravamudan 	 */
41979fee021dSVaishali Thakkar 	else if (!hugetlb_max_hstate)
4198a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
4199a3437870SNishanth Aravamudan 	else
4200a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
4201a3437870SNishanth Aravamudan 
42028faa8b07SAndi Kleen 	if (mhp == last_mhp) {
4203282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4204f81f6e4bSPeng Liu 		return 1;
42058faa8b07SAndi Kleen 	}
42068faa8b07SAndi Kleen 
4207b5389086SZhenguo Yao 	while (*p) {
4208b5389086SZhenguo Yao 		count = 0;
4209b5389086SZhenguo Yao 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4210b5389086SZhenguo Yao 			goto invalid;
4211b5389086SZhenguo Yao 		/* Parameter is node format */
4212b5389086SZhenguo Yao 		if (p[count] == ':') {
4213b5389086SZhenguo Yao 			if (!hugetlb_node_alloc_supported()) {
4214b5389086SZhenguo Yao 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4215f81f6e4bSPeng Liu 				return 1;
4216b5389086SZhenguo Yao 			}
42170a7a0f6fSPeng Liu 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4218e79ce983SLiu Yuntao 				goto invalid;
42190a7a0f6fSPeng Liu 			node = array_index_nospec(tmp, MAX_NUMNODES);
4220b5389086SZhenguo Yao 			p += count + 1;
4221b5389086SZhenguo Yao 			/* Parse hugepages */
4222b5389086SZhenguo Yao 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4223b5389086SZhenguo Yao 				goto invalid;
4224b5389086SZhenguo Yao 			if (!hugetlb_max_hstate)
4225b5389086SZhenguo Yao 				default_hugepages_in_node[node] = tmp;
4226b5389086SZhenguo Yao 			else
4227b5389086SZhenguo Yao 				parsed_hstate->max_huge_pages_node[node] = tmp;
4228b5389086SZhenguo Yao 			*mhp += tmp;
4229b5389086SZhenguo Yao 			/* Go to parse next node*/
4230b5389086SZhenguo Yao 			if (p[count] == ',')
4231b5389086SZhenguo Yao 				p += count + 1;
4232b5389086SZhenguo Yao 			else
4233b5389086SZhenguo Yao 				break;
4234b5389086SZhenguo Yao 		} else {
4235b5389086SZhenguo Yao 			if (p != s)
4236b5389086SZhenguo Yao 				goto invalid;
4237b5389086SZhenguo Yao 			*mhp = tmp;
4238b5389086SZhenguo Yao 			break;
4239b5389086SZhenguo Yao 		}
4240b5389086SZhenguo Yao 	}
4241a3437870SNishanth Aravamudan 
42428faa8b07SAndi Kleen 	/*
42438faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
424404adbc3fSMiaohe Lin 	 * But we need to allocate gigantic hstates here early to still
42458faa8b07SAndi Kleen 	 * use the bootmem allocator.
42468faa8b07SAndi Kleen 	 */
424704adbc3fSMiaohe Lin 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
42488faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
42498faa8b07SAndi Kleen 
42508faa8b07SAndi Kleen 	last_mhp = mhp;
42518faa8b07SAndi Kleen 
4252a3437870SNishanth Aravamudan 	return 1;
4253b5389086SZhenguo Yao 
4254b5389086SZhenguo Yao invalid:
4255b5389086SZhenguo Yao 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4256f87442f4SPeng Liu 	hugepages_clear_pages_in_node();
4257f81f6e4bSPeng Liu 	return 1;
4258a3437870SNishanth Aravamudan }
4259282f4214SMike Kravetz __setup("hugepages=", hugepages_setup);
4260e11bfbfcSNick Piggin 
4261282f4214SMike Kravetz /*
4262282f4214SMike Kravetz  * hugepagesz command line processing
4263282f4214SMike Kravetz  * A specific huge page size can only be specified once with hugepagesz.
4264282f4214SMike Kravetz  * hugepagesz is followed by hugepages on the command line.  The global
4265282f4214SMike Kravetz  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4266282f4214SMike Kravetz  * hugepagesz argument was valid.
4267282f4214SMike Kravetz  */
4268359f2544SMike Kravetz static int __init hugepagesz_setup(char *s)
4269e11bfbfcSNick Piggin {
4270359f2544SMike Kravetz 	unsigned long size;
4271282f4214SMike Kravetz 	struct hstate *h;
4272282f4214SMike Kravetz 
4273282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4274359f2544SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4275359f2544SMike Kravetz 
4276359f2544SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4277282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4278f81f6e4bSPeng Liu 		return 1;
4279359f2544SMike Kravetz 	}
4280359f2544SMike Kravetz 
4281282f4214SMike Kravetz 	h = size_to_hstate(size);
4282282f4214SMike Kravetz 	if (h) {
4283282f4214SMike Kravetz 		/*
4284282f4214SMike Kravetz 		 * hstate for this size already exists.  This is normally
4285282f4214SMike Kravetz 		 * an error, but is allowed if the existing hstate is the
4286282f4214SMike Kravetz 		 * default hstate.  More specifically, it is only allowed if
4287282f4214SMike Kravetz 		 * the number of huge pages for the default hstate was not
4288282f4214SMike Kravetz 		 * previously specified.
4289282f4214SMike Kravetz 		 */
4290282f4214SMike Kravetz 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4291282f4214SMike Kravetz 		    default_hstate.max_huge_pages) {
4292282f4214SMike Kravetz 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4293f81f6e4bSPeng Liu 			return 1;
429438237830SMike Kravetz 		}
429538237830SMike Kravetz 
4296282f4214SMike Kravetz 		/*
4297282f4214SMike Kravetz 		 * No need to call hugetlb_add_hstate() as hstate already
4298282f4214SMike Kravetz 		 * exists.  But, do set parsed_hstate so that a following
4299282f4214SMike Kravetz 		 * hugepages= parameter will be applied to this hstate.
4300282f4214SMike Kravetz 		 */
4301282f4214SMike Kravetz 		parsed_hstate = h;
4302282f4214SMike Kravetz 		parsed_valid_hugepagesz = true;
4303e11bfbfcSNick Piggin 		return 1;
4304e11bfbfcSNick Piggin 	}
4305282f4214SMike Kravetz 
4306359f2544SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4307282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4308359f2544SMike Kravetz 	return 1;
4309359f2544SMike Kravetz }
4310359f2544SMike Kravetz __setup("hugepagesz=", hugepagesz_setup);
4311359f2544SMike Kravetz 
4312282f4214SMike Kravetz /*
4313282f4214SMike Kravetz  * default_hugepagesz command line input
4314282f4214SMike Kravetz  * Only one instance of default_hugepagesz allowed on command line.
4315282f4214SMike Kravetz  */
4316ae94da89SMike Kravetz static int __init default_hugepagesz_setup(char *s)
4317e11bfbfcSNick Piggin {
4318ae94da89SMike Kravetz 	unsigned long size;
4319b5389086SZhenguo Yao 	int i;
4320ae94da89SMike Kravetz 
4321282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4322282f4214SMike Kravetz 	if (parsed_default_hugepagesz) {
4323282f4214SMike Kravetz 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4324f81f6e4bSPeng Liu 		return 1;
4325282f4214SMike Kravetz 	}
4326282f4214SMike Kravetz 
4327282f4214SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4328282f4214SMike Kravetz 
4329282f4214SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4330282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4331f81f6e4bSPeng Liu 		return 1;
4332282f4214SMike Kravetz 	}
4333282f4214SMike Kravetz 
4334282f4214SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4335282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4336282f4214SMike Kravetz 	parsed_default_hugepagesz = true;
4337282f4214SMike Kravetz 	default_hstate_idx = hstate_index(size_to_hstate(size));
4338282f4214SMike Kravetz 
4339282f4214SMike Kravetz 	/*
4340282f4214SMike Kravetz 	 * The number of default huge pages (for this size) could have been
4341282f4214SMike Kravetz 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4342282f4214SMike Kravetz 	 * then default_hstate_max_huge_pages is set.  If the default huge
4343282f4214SMike Kravetz 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4344282f4214SMike Kravetz 	 * allocated here from bootmem allocator.
4345282f4214SMike Kravetz 	 */
4346282f4214SMike Kravetz 	if (default_hstate_max_huge_pages) {
4347282f4214SMike Kravetz 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
43480a7a0f6fSPeng Liu 		for_each_online_node(i)
4349b5389086SZhenguo Yao 			default_hstate.max_huge_pages_node[i] =
4350b5389086SZhenguo Yao 				default_hugepages_in_node[i];
4351282f4214SMike Kravetz 		if (hstate_is_gigantic(&default_hstate))
4352282f4214SMike Kravetz 			hugetlb_hstate_alloc_pages(&default_hstate);
4353282f4214SMike Kravetz 		default_hstate_max_huge_pages = 0;
4354282f4214SMike Kravetz 	}
4355282f4214SMike Kravetz 
4356e11bfbfcSNick Piggin 	return 1;
4357e11bfbfcSNick Piggin }
4358ae94da89SMike Kravetz __setup("default_hugepagesz=", default_hugepagesz_setup);
4359a3437870SNishanth Aravamudan 
4360d2226ebdSFeng Tang static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4361d2226ebdSFeng Tang {
4362d2226ebdSFeng Tang #ifdef CONFIG_NUMA
4363d2226ebdSFeng Tang 	struct mempolicy *mpol = get_task_policy(current);
4364d2226ebdSFeng Tang 
4365d2226ebdSFeng Tang 	/*
4366d2226ebdSFeng Tang 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4367d2226ebdSFeng Tang 	 * (from policy_nodemask) specifically for hugetlb case
4368d2226ebdSFeng Tang 	 */
4369d2226ebdSFeng Tang 	if (mpol->mode == MPOL_BIND &&
4370d2226ebdSFeng Tang 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
4371d2226ebdSFeng Tang 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4372d2226ebdSFeng Tang 		return &mpol->nodes;
4373d2226ebdSFeng Tang #endif
4374d2226ebdSFeng Tang 	return NULL;
4375d2226ebdSFeng Tang }
4376d2226ebdSFeng Tang 
43778ca39e68SMuchun Song static unsigned int allowed_mems_nr(struct hstate *h)
43788a213460SNishanth Aravamudan {
43798a213460SNishanth Aravamudan 	int node;
43808a213460SNishanth Aravamudan 	unsigned int nr = 0;
4381d2226ebdSFeng Tang 	nodemask_t *mbind_nodemask;
43828ca39e68SMuchun Song 	unsigned int *array = h->free_huge_pages_node;
43838ca39e68SMuchun Song 	gfp_t gfp_mask = htlb_alloc_mask(h);
43848a213460SNishanth Aravamudan 
4385d2226ebdSFeng Tang 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
43868ca39e68SMuchun Song 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4387d2226ebdSFeng Tang 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
43888a213460SNishanth Aravamudan 			nr += array[node];
43898ca39e68SMuchun Song 	}
43908a213460SNishanth Aravamudan 
43918a213460SNishanth Aravamudan 	return nr;
43928a213460SNishanth Aravamudan }
43938a213460SNishanth Aravamudan 
43948a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
439517743798SMuchun Song static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
439617743798SMuchun Song 					  void *buffer, size_t *length,
439717743798SMuchun Song 					  loff_t *ppos, unsigned long *out)
439817743798SMuchun Song {
439917743798SMuchun Song 	struct ctl_table dup_table;
440017743798SMuchun Song 
440117743798SMuchun Song 	/*
440217743798SMuchun Song 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
440317743798SMuchun Song 	 * can duplicate the @table and alter the duplicate of it.
440417743798SMuchun Song 	 */
440517743798SMuchun Song 	dup_table = *table;
440617743798SMuchun Song 	dup_table.data = out;
440717743798SMuchun Song 
440817743798SMuchun Song 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
440917743798SMuchun Song }
441017743798SMuchun Song 
441106808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
441206808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
441332927393SChristoph Hellwig 			 void *buffer, size_t *length, loff_t *ppos)
44141da177e4SLinus Torvalds {
4415e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
4416238d3c13SDavid Rientjes 	unsigned long tmp = h->max_huge_pages;
441708d4a246SMichal Hocko 	int ret;
4418e5ff2159SAndi Kleen 
4419457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
442086613628SJan Stancek 		return -EOPNOTSUPP;
4421457c1b27SNishanth Aravamudan 
442217743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
442317743798SMuchun Song 					     &tmp);
442408d4a246SMichal Hocko 	if (ret)
442508d4a246SMichal Hocko 		goto out;
4426e5ff2159SAndi Kleen 
4427238d3c13SDavid Rientjes 	if (write)
4428238d3c13SDavid Rientjes 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4429238d3c13SDavid Rientjes 						  NUMA_NO_NODE, tmp, *length);
443008d4a246SMichal Hocko out:
443108d4a246SMichal Hocko 	return ret;
44321da177e4SLinus Torvalds }
4433396faf03SMel Gorman 
443406808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
443532927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
443606808b08SLee Schermerhorn {
443706808b08SLee Schermerhorn 
443806808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
443906808b08SLee Schermerhorn 							buffer, length, ppos);
444006808b08SLee Schermerhorn }
444106808b08SLee Schermerhorn 
444206808b08SLee Schermerhorn #ifdef CONFIG_NUMA
444306808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
444432927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
444506808b08SLee Schermerhorn {
444606808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
444706808b08SLee Schermerhorn 							buffer, length, ppos);
444806808b08SLee Schermerhorn }
444906808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
445006808b08SLee Schermerhorn 
4451a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
445232927393SChristoph Hellwig 		void *buffer, size_t *length, loff_t *ppos)
4453a3d0c6aaSNishanth Aravamudan {
4454a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
4455e5ff2159SAndi Kleen 	unsigned long tmp;
445608d4a246SMichal Hocko 	int ret;
4457e5ff2159SAndi Kleen 
4458457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
445986613628SJan Stancek 		return -EOPNOTSUPP;
4460457c1b27SNishanth Aravamudan 
4461e5ff2159SAndi Kleen 	tmp = h->nr_overcommit_huge_pages;
4462e5ff2159SAndi Kleen 
4463bae7f4aeSLuiz Capitulino 	if (write && hstate_is_gigantic(h))
4464adbe8726SEric B Munson 		return -EINVAL;
4465adbe8726SEric B Munson 
446617743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
446717743798SMuchun Song 					     &tmp);
446808d4a246SMichal Hocko 	if (ret)
446908d4a246SMichal Hocko 		goto out;
4470e5ff2159SAndi Kleen 
4471e5ff2159SAndi Kleen 	if (write) {
4472db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
4473e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
4474db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
4475e5ff2159SAndi Kleen 	}
447608d4a246SMichal Hocko out:
447708d4a246SMichal Hocko 	return ret;
4478a3d0c6aaSNishanth Aravamudan }
4479a3d0c6aaSNishanth Aravamudan 
44801da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
44811da177e4SLinus Torvalds 
4482e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
44831da177e4SLinus Torvalds {
4484fcb2b0c5SRoman Gushchin 	struct hstate *h;
4485fcb2b0c5SRoman Gushchin 	unsigned long total = 0;
4486fcb2b0c5SRoman Gushchin 
4487457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4488457c1b27SNishanth Aravamudan 		return;
4489fcb2b0c5SRoman Gushchin 
4490fcb2b0c5SRoman Gushchin 	for_each_hstate(h) {
4491fcb2b0c5SRoman Gushchin 		unsigned long count = h->nr_huge_pages;
4492fcb2b0c5SRoman Gushchin 
4493aca78307SMiaohe Lin 		total += huge_page_size(h) * count;
4494fcb2b0c5SRoman Gushchin 
4495fcb2b0c5SRoman Gushchin 		if (h == &default_hstate)
4496e1759c21SAlexey Dobriyan 			seq_printf(m,
44971da177e4SLinus Torvalds 				   "HugePages_Total:   %5lu\n"
44981da177e4SLinus Torvalds 				   "HugePages_Free:    %5lu\n"
4499b45b5bd6SDavid Gibson 				   "HugePages_Rsvd:    %5lu\n"
45007893d1d5SAdam Litke 				   "HugePages_Surp:    %5lu\n"
45014f98a2feSRik van Riel 				   "Hugepagesize:   %8lu kB\n",
4502fcb2b0c5SRoman Gushchin 				   count,
4503a5516438SAndi Kleen 				   h->free_huge_pages,
4504a5516438SAndi Kleen 				   h->resv_huge_pages,
4505a5516438SAndi Kleen 				   h->surplus_huge_pages,
4506aca78307SMiaohe Lin 				   huge_page_size(h) / SZ_1K);
4507fcb2b0c5SRoman Gushchin 	}
4508fcb2b0c5SRoman Gushchin 
4509aca78307SMiaohe Lin 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
45101da177e4SLinus Torvalds }
45111da177e4SLinus Torvalds 
45127981593bSJoe Perches int hugetlb_report_node_meminfo(char *buf, int len, int nid)
45131da177e4SLinus Torvalds {
4514a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
45157981593bSJoe Perches 
4516457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4517457c1b27SNishanth Aravamudan 		return 0;
45187981593bSJoe Perches 
45197981593bSJoe Perches 	return sysfs_emit_at(buf, len,
45201da177e4SLinus Torvalds 			     "Node %d HugePages_Total: %5u\n"
4521a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Free:  %5u\n"
4522a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Surp:  %5u\n",
4523a5516438SAndi Kleen 			     nid, h->nr_huge_pages_node[nid],
4524a5516438SAndi Kleen 			     nid, h->free_huge_pages_node[nid],
4525a5516438SAndi Kleen 			     nid, h->surplus_huge_pages_node[nid]);
45261da177e4SLinus Torvalds }
45271da177e4SLinus Torvalds 
4528dcadcf1cSGang Li void hugetlb_show_meminfo_node(int nid)
4529949f7ec5SDavid Rientjes {
4530949f7ec5SDavid Rientjes 	struct hstate *h;
4531949f7ec5SDavid Rientjes 
4532457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4533457c1b27SNishanth Aravamudan 		return;
4534457c1b27SNishanth Aravamudan 
4535949f7ec5SDavid Rientjes 	for_each_hstate(h)
4536dcadcf1cSGang Li 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4537949f7ec5SDavid Rientjes 			nid,
4538949f7ec5SDavid Rientjes 			h->nr_huge_pages_node[nid],
4539949f7ec5SDavid Rientjes 			h->free_huge_pages_node[nid],
4540949f7ec5SDavid Rientjes 			h->surplus_huge_pages_node[nid],
4541aca78307SMiaohe Lin 			huge_page_size(h) / SZ_1K);
4542949f7ec5SDavid Rientjes }
4543949f7ec5SDavid Rientjes 
45445d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
45455d317b2bSNaoya Horiguchi {
45465d317b2bSNaoya Horiguchi 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
45475d317b2bSNaoya Horiguchi 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
45485d317b2bSNaoya Horiguchi }
45495d317b2bSNaoya Horiguchi 
45501da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
45511da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
45521da177e4SLinus Torvalds {
4553d0028588SWanpeng Li 	struct hstate *h;
4554d0028588SWanpeng Li 	unsigned long nr_total_pages = 0;
4555d0028588SWanpeng Li 
4556d0028588SWanpeng Li 	for_each_hstate(h)
4557d0028588SWanpeng Li 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4558d0028588SWanpeng Li 	return nr_total_pages;
45591da177e4SLinus Torvalds }
45601da177e4SLinus Torvalds 
4561a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
4562fc1b8a73SMel Gorman {
4563fc1b8a73SMel Gorman 	int ret = -ENOMEM;
4564fc1b8a73SMel Gorman 
45650aa7f354SMiaohe Lin 	if (!delta)
45660aa7f354SMiaohe Lin 		return 0;
45670aa7f354SMiaohe Lin 
4568db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
4569fc1b8a73SMel Gorman 	/*
4570fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
4571fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
4572fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
4573fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
4574fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
4575fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
4576fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
4577fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
4578fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
4579fc1b8a73SMel Gorman 	 *
4580fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
4581fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
4582fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
4583fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
4584fc1b8a73SMel Gorman 	 * semantics that cpuset has.
45858ca39e68SMuchun Song 	 *
45868ca39e68SMuchun Song 	 * Apart from cpuset, we also have memory policy mechanism that
45878ca39e68SMuchun Song 	 * also determines from which node the kernel will allocate memory
45888ca39e68SMuchun Song 	 * in a NUMA system. So similar to cpuset, we also should consider
45898ca39e68SMuchun Song 	 * the memory policy of the current task. Similar to the description
45908ca39e68SMuchun Song 	 * above.
4591fc1b8a73SMel Gorman 	 */
4592fc1b8a73SMel Gorman 	if (delta > 0) {
4593a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
4594fc1b8a73SMel Gorman 			goto out;
4595fc1b8a73SMel Gorman 
45968ca39e68SMuchun Song 		if (delta > allowed_mems_nr(h)) {
4597a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
4598fc1b8a73SMel Gorman 			goto out;
4599fc1b8a73SMel Gorman 		}
4600fc1b8a73SMel Gorman 	}
4601fc1b8a73SMel Gorman 
4602fc1b8a73SMel Gorman 	ret = 0;
4603fc1b8a73SMel Gorman 	if (delta < 0)
4604a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
4605fc1b8a73SMel Gorman 
4606fc1b8a73SMel Gorman out:
4607db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
4608fc1b8a73SMel Gorman 	return ret;
4609fc1b8a73SMel Gorman }
4610fc1b8a73SMel Gorman 
461184afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
461284afd99bSAndy Whitcroft {
4613f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
461484afd99bSAndy Whitcroft 
461584afd99bSAndy Whitcroft 	/*
461684afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
461784afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
461884afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
461925985edcSLucas De Marchi 	 * has a reference to the reservation map it cannot disappear until
462084afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
462184afd99bSAndy Whitcroft 	 * new reference here without additional locking.
462284afd99bSAndy Whitcroft 	 */
462309a26e83SMike Kravetz 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
462409a26e83SMike Kravetz 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4625f522c3acSJoonsoo Kim 		kref_get(&resv->refs);
462684afd99bSAndy Whitcroft 	}
462709a26e83SMike Kravetz }
462884afd99bSAndy Whitcroft 
4629a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4630a1e78772SMel Gorman {
4631a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
4632f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
463390481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
46344e35f483SJoonsoo Kim 	unsigned long reserve, start, end;
46351c5ecae3SMike Kravetz 	long gbl_reserve;
463684afd99bSAndy Whitcroft 
46374e35f483SJoonsoo Kim 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
46384e35f483SJoonsoo Kim 		return;
46394e35f483SJoonsoo Kim 
4640a5516438SAndi Kleen 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4641a5516438SAndi Kleen 	end = vma_hugecache_offset(h, vma, vma->vm_end);
464284afd99bSAndy Whitcroft 
46434e35f483SJoonsoo Kim 	reserve = (end - start) - region_count(resv, start, end);
4644e9fe92aeSMina Almasry 	hugetlb_cgroup_uncharge_counter(resv, start, end);
46457251ff78SAdam Litke 	if (reserve) {
46461c5ecae3SMike Kravetz 		/*
46471c5ecae3SMike Kravetz 		 * Decrement reserve counts.  The global reserve count may be
46481c5ecae3SMike Kravetz 		 * adjusted if the subpool has a minimum size.
46491c5ecae3SMike Kravetz 		 */
46501c5ecae3SMike Kravetz 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
46511c5ecae3SMike Kravetz 		hugetlb_acct_memory(h, -gbl_reserve);
46527251ff78SAdam Litke 	}
4653e9fe92aeSMina Almasry 
4654e9fe92aeSMina Almasry 	kref_put(&resv->refs, resv_map_release);
4655a1e78772SMel Gorman }
4656a1e78772SMel Gorman 
465731383c68SDan Williams static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
465831383c68SDan Williams {
465931383c68SDan Williams 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
466031383c68SDan Williams 		return -EINVAL;
466131383c68SDan Williams 	return 0;
466231383c68SDan Williams }
466331383c68SDan Williams 
466405ea8860SDan Williams static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
466505ea8860SDan Williams {
4666aca78307SMiaohe Lin 	return huge_page_size(hstate_vma(vma));
466705ea8860SDan Williams }
466805ea8860SDan Williams 
46691da177e4SLinus Torvalds /*
46701da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
46711da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
46726c26d310SMiaohe Lin  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
46731da177e4SLinus Torvalds  * this far.
46741da177e4SLinus Torvalds  */
4675b3ec9f33SSouptick Joarder static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
46761da177e4SLinus Torvalds {
46771da177e4SLinus Torvalds 	BUG();
4678d0217ac0SNick Piggin 	return 0;
46791da177e4SLinus Torvalds }
46801da177e4SLinus Torvalds 
4681eec3636aSJane Chu /*
4682eec3636aSJane Chu  * When a new function is introduced to vm_operations_struct and added
4683eec3636aSJane Chu  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4684eec3636aSJane Chu  * This is because under System V memory model, mappings created via
4685eec3636aSJane Chu  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4686eec3636aSJane Chu  * their original vm_ops are overwritten with shm_vm_ops.
4687eec3636aSJane Chu  */
4688f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
4689d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
469084afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
4691a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
4692dd3b614fSDmitry Safonov 	.may_split = hugetlb_vm_op_split,
469305ea8860SDan Williams 	.pagesize = hugetlb_vm_op_pagesize,
46941da177e4SLinus Torvalds };
46951da177e4SLinus Torvalds 
46961e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
46971e8f889bSDavid Gibson 				int writable)
469863551ae0SDavid Gibson {
469963551ae0SDavid Gibson 	pte_t entry;
470079c1c594SChristophe Leroy 	unsigned int shift = huge_page_shift(hstate_vma(vma));
470163551ae0SDavid Gibson 
47021e8f889bSDavid Gibson 	if (writable) {
4703106c992aSGerald Schaefer 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4704106c992aSGerald Schaefer 					 vma->vm_page_prot)));
470563551ae0SDavid Gibson 	} else {
4706106c992aSGerald Schaefer 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4707106c992aSGerald Schaefer 					   vma->vm_page_prot));
470863551ae0SDavid Gibson 	}
470963551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
471079c1c594SChristophe Leroy 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
471163551ae0SDavid Gibson 
471263551ae0SDavid Gibson 	return entry;
471363551ae0SDavid Gibson }
471463551ae0SDavid Gibson 
47151e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
47161e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
47171e8f889bSDavid Gibson {
47181e8f889bSDavid Gibson 	pte_t entry;
47191e8f889bSDavid Gibson 
4720106c992aSGerald Schaefer 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
472132f84528SChris Forbes 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
47224b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
47231e8f889bSDavid Gibson }
47241e8f889bSDavid Gibson 
4725d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte)
47264a705fefSNaoya Horiguchi {
47274a705fefSNaoya Horiguchi 	swp_entry_t swp;
47284a705fefSNaoya Horiguchi 
47294a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
4730d5ed7444SAneesh Kumar K.V 		return false;
47314a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4732d79d176aSBaoquan He 	if (is_migration_entry(swp))
4733d5ed7444SAneesh Kumar K.V 		return true;
47344a705fefSNaoya Horiguchi 	else
4735d5ed7444SAneesh Kumar K.V 		return false;
47364a705fefSNaoya Horiguchi }
47374a705fefSNaoya Horiguchi 
47383e5c3600SBaoquan He static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
47394a705fefSNaoya Horiguchi {
47404a705fefSNaoya Horiguchi 	swp_entry_t swp;
47414a705fefSNaoya Horiguchi 
47424a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
47433e5c3600SBaoquan He 		return false;
47444a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4745d79d176aSBaoquan He 	if (is_hwpoison_entry(swp))
47463e5c3600SBaoquan He 		return true;
47474a705fefSNaoya Horiguchi 	else
47483e5c3600SBaoquan He 		return false;
47494a705fefSNaoya Horiguchi }
47501e8f889bSDavid Gibson 
47514eae4efaSPeter Xu static void
47524eae4efaSPeter Xu hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
47534eae4efaSPeter Xu 		     struct page *new_page)
47544eae4efaSPeter Xu {
47554eae4efaSPeter Xu 	__SetPageUptodate(new_page);
47564eae4efaSPeter Xu 	hugepage_add_new_anon_rmap(new_page, vma, addr);
47571eba86c0SPasha Tatashin 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
47584eae4efaSPeter Xu 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
47594eae4efaSPeter Xu 	ClearHPageRestoreReserve(new_page);
47604eae4efaSPeter Xu 	SetHPageMigratable(new_page);
47614eae4efaSPeter Xu }
47624eae4efaSPeter Xu 
476363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4764bc70fbf2SPeter Xu 			    struct vm_area_struct *dst_vma,
4765bc70fbf2SPeter Xu 			    struct vm_area_struct *src_vma)
476663551ae0SDavid Gibson {
47673aa4ed80SMiaohe Lin 	pte_t *src_pte, *dst_pte, entry;
476863551ae0SDavid Gibson 	struct page *ptepage;
47691c59827dSHugh Dickins 	unsigned long addr;
4770bc70fbf2SPeter Xu 	bool cow = is_cow_mapping(src_vma->vm_flags);
4771bc70fbf2SPeter Xu 	struct hstate *h = hstate_vma(src_vma);
4772a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
47734eae4efaSPeter Xu 	unsigned long npages = pages_per_huge_page(h);
4774bc70fbf2SPeter Xu 	struct address_space *mapping = src_vma->vm_file->f_mapping;
4775ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
4776e95a9851SMike Kravetz 	unsigned long last_addr_mask;
4777e8569dd2SAndreas Sandberg 	int ret = 0;
47781e8f889bSDavid Gibson 
4779ac46d4f3SJérôme Glisse 	if (cow) {
4780bc70fbf2SPeter Xu 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
4781bc70fbf2SPeter Xu 					src_vma->vm_start,
4782bc70fbf2SPeter Xu 					src_vma->vm_end);
4783ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_start(&range);
4784623a1ddfSDavid Hildenbrand 		mmap_assert_write_locked(src);
4785623a1ddfSDavid Hildenbrand 		raw_write_seqcount_begin(&src->write_protect_seq);
4786c0d0381aSMike Kravetz 	} else {
4787c0d0381aSMike Kravetz 		/*
4788c0d0381aSMike Kravetz 		 * For shared mappings i_mmap_rwsem must be held to call
4789c0d0381aSMike Kravetz 		 * huge_pte_alloc, otherwise the returned ptep could go
4790c0d0381aSMike Kravetz 		 * away if part of a shared pmd and another thread calls
4791c0d0381aSMike Kravetz 		 * huge_pmd_unshare.
4792c0d0381aSMike Kravetz 		 */
4793c0d0381aSMike Kravetz 		i_mmap_lock_read(mapping);
4794ac46d4f3SJérôme Glisse 	}
4795e8569dd2SAndreas Sandberg 
4796e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
4797bc70fbf2SPeter Xu 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4798cb900f41SKirill A. Shutemov 		spinlock_t *src_ptl, *dst_ptl;
47997868a208SPunit Agrawal 		src_pte = huge_pte_offset(src, addr, sz);
4800e95a9851SMike Kravetz 		if (!src_pte) {
4801e95a9851SMike Kravetz 			addr |= last_addr_mask;
4802c74df32cSHugh Dickins 			continue;
4803e95a9851SMike Kravetz 		}
4804bc70fbf2SPeter Xu 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4805e8569dd2SAndreas Sandberg 		if (!dst_pte) {
4806e8569dd2SAndreas Sandberg 			ret = -ENOMEM;
4807e8569dd2SAndreas Sandberg 			break;
4808e8569dd2SAndreas Sandberg 		}
4809c5c99429SLarry Woodman 
48105e41540cSMike Kravetz 		/*
48115e41540cSMike Kravetz 		 * If the pagetables are shared don't copy or take references.
48125e41540cSMike Kravetz 		 *
48133aa4ed80SMiaohe Lin 		 * dst_pte == src_pte is the common case of src/dest sharing.
48145e41540cSMike Kravetz 		 * However, src could have 'unshared' and dst shares with
48153aa4ed80SMiaohe Lin 		 * another vma. So page_count of ptep page is checked instead
48163aa4ed80SMiaohe Lin 		 * to reliably determine whether pte is shared.
48175e41540cSMike Kravetz 		 */
48183aa4ed80SMiaohe Lin 		if (page_count(virt_to_page(dst_pte)) > 1) {
4819e95a9851SMike Kravetz 			addr |= last_addr_mask;
4820c5c99429SLarry Woodman 			continue;
4821e95a9851SMike Kravetz 		}
4822c5c99429SLarry Woodman 
4823cb900f41SKirill A. Shutemov 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4824cb900f41SKirill A. Shutemov 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4825cb900f41SKirill A. Shutemov 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
48264a705fefSNaoya Horiguchi 		entry = huge_ptep_get(src_pte);
48274eae4efaSPeter Xu again:
48283aa4ed80SMiaohe Lin 		if (huge_pte_none(entry)) {
48295e41540cSMike Kravetz 			/*
48303aa4ed80SMiaohe Lin 			 * Skip if src entry none.
48315e41540cSMike Kravetz 			 */
48324a705fefSNaoya Horiguchi 			;
4833c2cb0dccSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
4834c2cb0dccSNaoya Horiguchi 			bool uffd_wp = huge_pte_uffd_wp(entry);
4835c2cb0dccSNaoya Horiguchi 
4836c2cb0dccSNaoya Horiguchi 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4837c2cb0dccSNaoya Horiguchi 				entry = huge_pte_clear_uffd_wp(entry);
4838c2cb0dccSNaoya Horiguchi 			set_huge_pte_at(dst, addr, dst_pte, entry);
4839c2cb0dccSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_migration(entry))) {
48404a705fefSNaoya Horiguchi 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4841bc70fbf2SPeter Xu 			bool uffd_wp = huge_pte_uffd_wp(entry);
48424a705fefSNaoya Horiguchi 
48436c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(swp_entry) && cow) {
48444a705fefSNaoya Horiguchi 				/*
48454a705fefSNaoya Horiguchi 				 * COW mappings require pages in both
48464a705fefSNaoya Horiguchi 				 * parent and child to be set to read.
48474a705fefSNaoya Horiguchi 				 */
48484dd845b5SAlistair Popple 				swp_entry = make_readable_migration_entry(
48494dd845b5SAlistair Popple 							swp_offset(swp_entry));
48504a705fefSNaoya Horiguchi 				entry = swp_entry_to_pte(swp_entry);
4851bc70fbf2SPeter Xu 				if (userfaultfd_wp(src_vma) && uffd_wp)
4852bc70fbf2SPeter Xu 					entry = huge_pte_mkuffd_wp(entry);
485318f39629SQi Zheng 				set_huge_pte_at(src, addr, src_pte, entry);
48544a705fefSNaoya Horiguchi 			}
4855bc70fbf2SPeter Xu 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4856bc70fbf2SPeter Xu 				entry = huge_pte_clear_uffd_wp(entry);
485718f39629SQi Zheng 			set_huge_pte_at(dst, addr, dst_pte, entry);
4858bc70fbf2SPeter Xu 		} else if (unlikely(is_pte_marker(entry))) {
4859bc70fbf2SPeter Xu 			/*
4860bc70fbf2SPeter Xu 			 * We copy the pte marker only if the dst vma has
4861bc70fbf2SPeter Xu 			 * uffd-wp enabled.
4862bc70fbf2SPeter Xu 			 */
4863bc70fbf2SPeter Xu 			if (userfaultfd_wp(dst_vma))
4864bc70fbf2SPeter Xu 				set_huge_pte_at(dst, addr, dst_pte, entry);
48654a705fefSNaoya Horiguchi 		} else {
48664eae4efaSPeter Xu 			entry = huge_ptep_get(src_pte);
48674eae4efaSPeter Xu 			ptepage = pte_page(entry);
48684eae4efaSPeter Xu 			get_page(ptepage);
48694eae4efaSPeter Xu 
48704eae4efaSPeter Xu 			/*
4871fb3d824dSDavid Hildenbrand 			 * Failing to duplicate the anon rmap is a rare case
4872fb3d824dSDavid Hildenbrand 			 * where we see pinned hugetlb pages while they're
4873fb3d824dSDavid Hildenbrand 			 * prone to COW. We need to do the COW earlier during
4874fb3d824dSDavid Hildenbrand 			 * fork.
48754eae4efaSPeter Xu 			 *
48764eae4efaSPeter Xu 			 * When pre-allocating the page or copying data, we
48774eae4efaSPeter Xu 			 * need to be without the pgtable locks since we could
48784eae4efaSPeter Xu 			 * sleep during the process.
48794eae4efaSPeter Xu 			 */
4880fb3d824dSDavid Hildenbrand 			if (!PageAnon(ptepage)) {
4881fb3d824dSDavid Hildenbrand 				page_dup_file_rmap(ptepage, true);
4882bc70fbf2SPeter Xu 			} else if (page_try_dup_anon_rmap(ptepage, true,
4883bc70fbf2SPeter Xu 							  src_vma)) {
48844eae4efaSPeter Xu 				pte_t src_pte_old = entry;
48854eae4efaSPeter Xu 				struct page *new;
48864eae4efaSPeter Xu 
48874eae4efaSPeter Xu 				spin_unlock(src_ptl);
48884eae4efaSPeter Xu 				spin_unlock(dst_ptl);
48894eae4efaSPeter Xu 				/* Do not use reserve as it's private owned */
4890bc70fbf2SPeter Xu 				new = alloc_huge_page(dst_vma, addr, 1);
48914eae4efaSPeter Xu 				if (IS_ERR(new)) {
48924eae4efaSPeter Xu 					put_page(ptepage);
48934eae4efaSPeter Xu 					ret = PTR_ERR(new);
48944eae4efaSPeter Xu 					break;
48954eae4efaSPeter Xu 				}
4896bc70fbf2SPeter Xu 				copy_user_huge_page(new, ptepage, addr, dst_vma,
48974eae4efaSPeter Xu 						    npages);
48984eae4efaSPeter Xu 				put_page(ptepage);
48994eae4efaSPeter Xu 
49004eae4efaSPeter Xu 				/* Install the new huge page if src pte stable */
49014eae4efaSPeter Xu 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
49024eae4efaSPeter Xu 				src_ptl = huge_pte_lockptr(h, src, src_pte);
49034eae4efaSPeter Xu 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
49044eae4efaSPeter Xu 				entry = huge_ptep_get(src_pte);
49054eae4efaSPeter Xu 				if (!pte_same(src_pte_old, entry)) {
4906bc70fbf2SPeter Xu 					restore_reserve_on_error(h, dst_vma, addr,
4907846be085SMike Kravetz 								new);
49084eae4efaSPeter Xu 					put_page(new);
49093aa4ed80SMiaohe Lin 					/* huge_ptep of dst_pte won't change as in child */
49104eae4efaSPeter Xu 					goto again;
49114eae4efaSPeter Xu 				}
4912bc70fbf2SPeter Xu 				hugetlb_install_page(dst_vma, dst_pte, addr, new);
49134eae4efaSPeter Xu 				spin_unlock(src_ptl);
49144eae4efaSPeter Xu 				spin_unlock(dst_ptl);
49154eae4efaSPeter Xu 				continue;
49164eae4efaSPeter Xu 			}
49174eae4efaSPeter Xu 
491834ee645eSJoerg Roedel 			if (cow) {
49190f10851eSJérôme Glisse 				/*
49200f10851eSJérôme Glisse 				 * No need to notify as we are downgrading page
49210f10851eSJérôme Glisse 				 * table protection not changing it to point
49220f10851eSJérôme Glisse 				 * to a new page.
49230f10851eSJérôme Glisse 				 *
4924ee65728eSMike Rapoport 				 * See Documentation/mm/mmu_notifier.rst
49250f10851eSJérôme Glisse 				 */
49267f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
492784894e1cSPeter Xu 				entry = huge_pte_wrprotect(entry);
492834ee645eSJoerg Roedel 			}
49294eae4efaSPeter Xu 
493063551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
49314eae4efaSPeter Xu 			hugetlb_count_add(npages, dst);
49321c59827dSHugh Dickins 		}
4933cb900f41SKirill A. Shutemov 		spin_unlock(src_ptl);
4934cb900f41SKirill A. Shutemov 		spin_unlock(dst_ptl);
493563551ae0SDavid Gibson 	}
493663551ae0SDavid Gibson 
4937623a1ddfSDavid Hildenbrand 	if (cow) {
4938623a1ddfSDavid Hildenbrand 		raw_write_seqcount_end(&src->write_protect_seq);
4939ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_end(&range);
4940623a1ddfSDavid Hildenbrand 	} else {
4941c0d0381aSMike Kravetz 		i_mmap_unlock_read(mapping);
4942623a1ddfSDavid Hildenbrand 	}
4943e8569dd2SAndreas Sandberg 
4944e8569dd2SAndreas Sandberg 	return ret;
494563551ae0SDavid Gibson }
494663551ae0SDavid Gibson 
4947550a7d60SMina Almasry static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4948db110a99SAneesh Kumar K.V 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4949550a7d60SMina Almasry {
4950550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4951550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4952550a7d60SMina Almasry 	spinlock_t *src_ptl, *dst_ptl;
4953db110a99SAneesh Kumar K.V 	pte_t pte;
4954550a7d60SMina Almasry 
4955550a7d60SMina Almasry 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4956550a7d60SMina Almasry 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4957550a7d60SMina Almasry 
4958550a7d60SMina Almasry 	/*
4959550a7d60SMina Almasry 	 * We don't have to worry about the ordering of src and dst ptlocks
4960550a7d60SMina Almasry 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4961550a7d60SMina Almasry 	 */
4962550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4963550a7d60SMina Almasry 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4964550a7d60SMina Almasry 
4965550a7d60SMina Almasry 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4966550a7d60SMina Almasry 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4967550a7d60SMina Almasry 
4968550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4969550a7d60SMina Almasry 		spin_unlock(src_ptl);
4970550a7d60SMina Almasry 	spin_unlock(dst_ptl);
4971550a7d60SMina Almasry }
4972550a7d60SMina Almasry 
4973550a7d60SMina Almasry int move_hugetlb_page_tables(struct vm_area_struct *vma,
4974550a7d60SMina Almasry 			     struct vm_area_struct *new_vma,
4975550a7d60SMina Almasry 			     unsigned long old_addr, unsigned long new_addr,
4976550a7d60SMina Almasry 			     unsigned long len)
4977550a7d60SMina Almasry {
4978550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4979550a7d60SMina Almasry 	struct address_space *mapping = vma->vm_file->f_mapping;
4980550a7d60SMina Almasry 	unsigned long sz = huge_page_size(h);
4981550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4982550a7d60SMina Almasry 	unsigned long old_end = old_addr + len;
4983e95a9851SMike Kravetz 	unsigned long last_addr_mask;
4984550a7d60SMina Almasry 	pte_t *src_pte, *dst_pte;
4985550a7d60SMina Almasry 	struct mmu_notifier_range range;
49863d0b95cdSBaolin Wang 	bool shared_pmd = false;
4987550a7d60SMina Almasry 
4988550a7d60SMina Almasry 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
4989550a7d60SMina Almasry 				old_end);
4990550a7d60SMina Almasry 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
49913d0b95cdSBaolin Wang 	/*
49923d0b95cdSBaolin Wang 	 * In case of shared PMDs, we should cover the maximum possible
49933d0b95cdSBaolin Wang 	 * range.
49943d0b95cdSBaolin Wang 	 */
49953d0b95cdSBaolin Wang 	flush_cache_range(vma, range.start, range.end);
49963d0b95cdSBaolin Wang 
4997550a7d60SMina Almasry 	mmu_notifier_invalidate_range_start(&range);
4998e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
4999550a7d60SMina Almasry 	/* Prevent race with file truncation */
5000550a7d60SMina Almasry 	i_mmap_lock_write(mapping);
5001550a7d60SMina Almasry 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5002550a7d60SMina Almasry 		src_pte = huge_pte_offset(mm, old_addr, sz);
5003e95a9851SMike Kravetz 		if (!src_pte) {
5004e95a9851SMike Kravetz 			old_addr |= last_addr_mask;
5005e95a9851SMike Kravetz 			new_addr |= last_addr_mask;
5006550a7d60SMina Almasry 			continue;
5007e95a9851SMike Kravetz 		}
5008550a7d60SMina Almasry 		if (huge_pte_none(huge_ptep_get(src_pte)))
5009550a7d60SMina Almasry 			continue;
5010550a7d60SMina Almasry 
50114ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
50123d0b95cdSBaolin Wang 			shared_pmd = true;
50134ddb4d91SMike Kravetz 			old_addr |= last_addr_mask;
50144ddb4d91SMike Kravetz 			new_addr |= last_addr_mask;
5015550a7d60SMina Almasry 			continue;
50163d0b95cdSBaolin Wang 		}
5017550a7d60SMina Almasry 
5018550a7d60SMina Almasry 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5019550a7d60SMina Almasry 		if (!dst_pte)
5020550a7d60SMina Almasry 			break;
5021550a7d60SMina Almasry 
5022db110a99SAneesh Kumar K.V 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
5023550a7d60SMina Almasry 	}
50243d0b95cdSBaolin Wang 
50253d0b95cdSBaolin Wang 	if (shared_pmd)
50263d0b95cdSBaolin Wang 		flush_tlb_range(vma, range.start, range.end);
50273d0b95cdSBaolin Wang 	else
5028550a7d60SMina Almasry 		flush_tlb_range(vma, old_end - len, old_end);
5029550a7d60SMina Almasry 	mmu_notifier_invalidate_range_end(&range);
503013e4ad2cSNadav Amit 	i_mmap_unlock_write(mapping);
5031550a7d60SMina Almasry 
5032550a7d60SMina Almasry 	return len + old_addr - old_end;
5033550a7d60SMina Almasry }
5034550a7d60SMina Almasry 
503573c54763SPeter Xu static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
503624669e58SAneesh Kumar K.V 				   unsigned long start, unsigned long end,
503705e90bd0SPeter Xu 				   struct page *ref_page, zap_flags_t zap_flags)
503863551ae0SDavid Gibson {
503963551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
504063551ae0SDavid Gibson 	unsigned long address;
5041c7546f8fSDavid Gibson 	pte_t *ptep;
504263551ae0SDavid Gibson 	pte_t pte;
5043cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
504463551ae0SDavid Gibson 	struct page *page;
5045a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
5046a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
5047ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
5048e95a9851SMike Kravetz 	unsigned long last_addr_mask;
5049a4a118f2SNadav Amit 	bool force_flush = false;
5050a5516438SAndi Kleen 
505163551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
5052a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
5053a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
505463551ae0SDavid Gibson 
505507e32661SAneesh Kumar K.V 	/*
505607e32661SAneesh Kumar K.V 	 * This is a hugetlb vma, all the pte entries should point
505707e32661SAneesh Kumar K.V 	 * to huge page.
505807e32661SAneesh Kumar K.V 	 */
5059ed6a7935SPeter Zijlstra 	tlb_change_page_size(tlb, sz);
506024669e58SAneesh Kumar K.V 	tlb_start_vma(tlb, vma);
5061dff11abeSMike Kravetz 
5062dff11abeSMike Kravetz 	/*
5063dff11abeSMike Kravetz 	 * If sharing possible, alert mmu notifiers of worst case.
5064dff11abeSMike Kravetz 	 */
50656f4f13e8SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
50666f4f13e8SJérôme Glisse 				end);
5067ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5068ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5069e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
5070569f48b8SHillf Danton 	address = start;
5071569f48b8SHillf Danton 	for (; address < end; address += sz) {
50727868a208SPunit Agrawal 		ptep = huge_pte_offset(mm, address, sz);
5073e95a9851SMike Kravetz 		if (!ptep) {
5074e95a9851SMike Kravetz 			address |= last_addr_mask;
5075c7546f8fSDavid Gibson 			continue;
5076e95a9851SMike Kravetz 		}
5077c7546f8fSDavid Gibson 
5078cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
50794ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
508031d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
5081a4a118f2SNadav Amit 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5082a4a118f2SNadav Amit 			force_flush = true;
50834ddb4d91SMike Kravetz 			address |= last_addr_mask;
508431d49da5SAneesh Kumar K.V 			continue;
508531d49da5SAneesh Kumar K.V 		}
508639dde65cSChen, Kenneth W 
50876629326bSHillf Danton 		pte = huge_ptep_get(ptep);
508831d49da5SAneesh Kumar K.V 		if (huge_pte_none(pte)) {
508931d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
509031d49da5SAneesh Kumar K.V 			continue;
509131d49da5SAneesh Kumar K.V 		}
50926629326bSHillf Danton 
50936629326bSHillf Danton 		/*
50949fbc1f63SNaoya Horiguchi 		 * Migrating hugepage or HWPoisoned hugepage is already
50959fbc1f63SNaoya Horiguchi 		 * unmapped and its refcount is dropped, so just clear pte here.
50966629326bSHillf Danton 		 */
50979fbc1f63SNaoya Horiguchi 		if (unlikely(!pte_present(pte))) {
509805e90bd0SPeter Xu 			/*
509905e90bd0SPeter Xu 			 * If the pte was wr-protected by uffd-wp in any of the
510005e90bd0SPeter Xu 			 * swap forms, meanwhile the caller does not want to
510105e90bd0SPeter Xu 			 * drop the uffd-wp bit in this zap, then replace the
510205e90bd0SPeter Xu 			 * pte with a marker.
510305e90bd0SPeter Xu 			 */
510405e90bd0SPeter Xu 			if (pte_swp_uffd_wp_any(pte) &&
510505e90bd0SPeter Xu 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
510605e90bd0SPeter Xu 				set_huge_pte_at(mm, address, ptep,
510705e90bd0SPeter Xu 						make_pte_marker(PTE_MARKER_UFFD_WP));
510805e90bd0SPeter Xu 			else
51099386fac3SPunit Agrawal 				huge_pte_clear(mm, address, ptep, sz);
511031d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
511131d49da5SAneesh Kumar K.V 			continue;
51128c4894c6SNaoya Horiguchi 		}
51136629326bSHillf Danton 
51146629326bSHillf Danton 		page = pte_page(pte);
511504f2cbe3SMel Gorman 		/*
511604f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
511704f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
511804f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
511904f2cbe3SMel Gorman 		 */
512004f2cbe3SMel Gorman 		if (ref_page) {
512131d49da5SAneesh Kumar K.V 			if (page != ref_page) {
512231d49da5SAneesh Kumar K.V 				spin_unlock(ptl);
512331d49da5SAneesh Kumar K.V 				continue;
512431d49da5SAneesh Kumar K.V 			}
512504f2cbe3SMel Gorman 			/*
512604f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
512704f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
512804f2cbe3SMel Gorman 			 * looking like data was lost
512904f2cbe3SMel Gorman 			 */
513004f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
513104f2cbe3SMel Gorman 		}
513204f2cbe3SMel Gorman 
5133c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5134b528e4b6SAneesh Kumar K.V 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5135106c992aSGerald Schaefer 		if (huge_pte_dirty(pte))
51366649a386SKen Chen 			set_page_dirty(page);
513705e90bd0SPeter Xu 		/* Leave a uffd-wp pte marker if needed */
513805e90bd0SPeter Xu 		if (huge_pte_uffd_wp(pte) &&
513905e90bd0SPeter Xu 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
514005e90bd0SPeter Xu 			set_huge_pte_at(mm, address, ptep,
514105e90bd0SPeter Xu 					make_pte_marker(PTE_MARKER_UFFD_WP));
51425d317b2bSNaoya Horiguchi 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5143cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, true);
514431d49da5SAneesh Kumar K.V 
5145cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
5146e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, huge_page_size(h));
514724669e58SAneesh Kumar K.V 		/*
514831d49da5SAneesh Kumar K.V 		 * Bail out after unmapping reference page if supplied
514924669e58SAneesh Kumar K.V 		 */
515031d49da5SAneesh Kumar K.V 		if (ref_page)
515131d49da5SAneesh Kumar K.V 			break;
5152fe1668aeSChen, Kenneth W 	}
5153ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
515424669e58SAneesh Kumar K.V 	tlb_end_vma(tlb, vma);
5155a4a118f2SNadav Amit 
5156a4a118f2SNadav Amit 	/*
5157a4a118f2SNadav Amit 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5158a4a118f2SNadav Amit 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5159a4a118f2SNadav Amit 	 * guaranteed that the last refernece would not be dropped. But we must
5160a4a118f2SNadav Amit 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5161a4a118f2SNadav Amit 	 * dropped and the last reference to the shared PMDs page might be
5162a4a118f2SNadav Amit 	 * dropped as well.
5163a4a118f2SNadav Amit 	 *
5164a4a118f2SNadav Amit 	 * In theory we could defer the freeing of the PMD pages as well, but
5165a4a118f2SNadav Amit 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5166a4a118f2SNadav Amit 	 * detect sharing, so we cannot defer the release of the page either.
5167a4a118f2SNadav Amit 	 * Instead, do flush now.
5168a4a118f2SNadav Amit 	 */
5169a4a118f2SNadav Amit 	if (force_flush)
5170a4a118f2SNadav Amit 		tlb_flush_mmu_tlbonly(tlb);
51711da177e4SLinus Torvalds }
517263551ae0SDavid Gibson 
5173d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5174d833352aSMel Gorman 			  struct vm_area_struct *vma, unsigned long start,
517505e90bd0SPeter Xu 			  unsigned long end, struct page *ref_page,
517605e90bd0SPeter Xu 			  zap_flags_t zap_flags)
5177d833352aSMel Gorman {
517805e90bd0SPeter Xu 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5179d833352aSMel Gorman 
5180d833352aSMel Gorman 	/*
5181d833352aSMel Gorman 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
5182d833352aSMel Gorman 	 * test will fail on a vma being torn down, and not grab a page table
5183d833352aSMel Gorman 	 * on its way out.  We're lucky that the flag has such an appropriate
5184d833352aSMel Gorman 	 * name, and can in fact be safely cleared here. We could clear it
5185d833352aSMel Gorman 	 * before the __unmap_hugepage_range above, but all that's necessary
5186c8c06efaSDavidlohr Bueso 	 * is to clear it before releasing the i_mmap_rwsem. This works
5187d833352aSMel Gorman 	 * because in the context this is called, the VMA is about to be
5188c8c06efaSDavidlohr Bueso 	 * destroyed and the i_mmap_rwsem is held.
5189d833352aSMel Gorman 	 */
5190d833352aSMel Gorman 	vma->vm_flags &= ~VM_MAYSHARE;
5191d833352aSMel Gorman }
5192d833352aSMel Gorman 
5193502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
519405e90bd0SPeter Xu 			  unsigned long end, struct page *ref_page,
519505e90bd0SPeter Xu 			  zap_flags_t zap_flags)
5196502717f4SChen, Kenneth W {
519724669e58SAneesh Kumar K.V 	struct mmu_gather tlb;
5198dff11abeSMike Kravetz 
5199a72afd87SWill Deacon 	tlb_gather_mmu(&tlb, vma->vm_mm);
520005e90bd0SPeter Xu 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5201ae8eba8bSWill Deacon 	tlb_finish_mmu(&tlb);
5202502717f4SChen, Kenneth W }
5203502717f4SChen, Kenneth W 
520404f2cbe3SMel Gorman /*
520504f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5206578b7725SZhiyuan Dai  * mapping it owns the reserve page for. The intention is to unmap the page
520704f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
520804f2cbe3SMel Gorman  * same region.
520904f2cbe3SMel Gorman  */
52102f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
52112a4b3dedSHarvey Harrison 			      struct page *page, unsigned long address)
521204f2cbe3SMel Gorman {
52137526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
521404f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
521504f2cbe3SMel Gorman 	struct address_space *mapping;
521604f2cbe3SMel Gorman 	pgoff_t pgoff;
521704f2cbe3SMel Gorman 
521804f2cbe3SMel Gorman 	/*
521904f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
522004f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
522104f2cbe3SMel Gorman 	 */
52227526674dSAdam Litke 	address = address & huge_page_mask(h);
522336e4f20aSMichal Hocko 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
522436e4f20aSMichal Hocko 			vma->vm_pgoff;
522593c76a3dSAl Viro 	mapping = vma->vm_file->f_mapping;
522604f2cbe3SMel Gorman 
52274eb2b1dcSMel Gorman 	/*
52284eb2b1dcSMel Gorman 	 * Take the mapping lock for the duration of the table walk. As
52294eb2b1dcSMel Gorman 	 * this mapping should be shared between all the VMAs,
52304eb2b1dcSMel Gorman 	 * __unmap_hugepage_range() is called as the lock is already held
52314eb2b1dcSMel Gorman 	 */
523283cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
52336b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
523404f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
523504f2cbe3SMel Gorman 		if (iter_vma == vma)
523604f2cbe3SMel Gorman 			continue;
523704f2cbe3SMel Gorman 
523804f2cbe3SMel Gorman 		/*
52392f84a899SMel Gorman 		 * Shared VMAs have their own reserves and do not affect
52402f84a899SMel Gorman 		 * MAP_PRIVATE accounting but it is possible that a shared
52412f84a899SMel Gorman 		 * VMA is using the same page so check and skip such VMAs.
52422f84a899SMel Gorman 		 */
52432f84a899SMel Gorman 		if (iter_vma->vm_flags & VM_MAYSHARE)
52442f84a899SMel Gorman 			continue;
52452f84a899SMel Gorman 
52462f84a899SMel Gorman 		/*
524704f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
524804f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
524904f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
525004f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
525104f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
525204f2cbe3SMel Gorman 		 */
525304f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
525424669e58SAneesh Kumar K.V 			unmap_hugepage_range(iter_vma, address,
525505e90bd0SPeter Xu 					     address + huge_page_size(h), page, 0);
525604f2cbe3SMel Gorman 	}
525783cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
525804f2cbe3SMel Gorman }
525904f2cbe3SMel Gorman 
52600fe6e20bSNaoya Horiguchi /*
5261c89357e2SDavid Hildenbrand  * hugetlb_wp() should be called with page lock of the original hugepage held.
5262aa6d2e8cSBaolin Wang  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5263ef009b25SMichal Hocko  * cannot race with other handlers or page migration.
5264ef009b25SMichal Hocko  * Keep the pte_same checks anyway to make transition from the mutex easier.
52650fe6e20bSNaoya Horiguchi  */
5266c89357e2SDavid Hildenbrand static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5267c89357e2SDavid Hildenbrand 		       unsigned long address, pte_t *ptep, unsigned int flags,
5268cb900f41SKirill A. Shutemov 		       struct page *pagecache_page, spinlock_t *ptl)
52691e8f889bSDavid Gibson {
5270c89357e2SDavid Hildenbrand 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
52713999f52eSAneesh Kumar K.V 	pte_t pte;
5272a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
52731e8f889bSDavid Gibson 	struct page *old_page, *new_page;
52742b740303SSouptick Joarder 	int outside_reserve = 0;
52752b740303SSouptick Joarder 	vm_fault_t ret = 0;
5276974e6d66SHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5277ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
52781e8f889bSDavid Gibson 
5279c89357e2SDavid Hildenbrand 	VM_BUG_ON(unshare && (flags & FOLL_WRITE));
5280c89357e2SDavid Hildenbrand 	VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
5281c89357e2SDavid Hildenbrand 
52821d8d1464SDavid Hildenbrand 	/*
52831d8d1464SDavid Hildenbrand 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
52841d8d1464SDavid Hildenbrand 	 * PTE mapped R/O such as maybe_mkwrite() would do.
52851d8d1464SDavid Hildenbrand 	 */
52861d8d1464SDavid Hildenbrand 	if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
52871d8d1464SDavid Hildenbrand 		return VM_FAULT_SIGSEGV;
52881d8d1464SDavid Hildenbrand 
52891d8d1464SDavid Hildenbrand 	/* Let's take out MAP_SHARED mappings first. */
52901d8d1464SDavid Hildenbrand 	if (vma->vm_flags & VM_MAYSHARE) {
52911d8d1464SDavid Hildenbrand 		if (unlikely(unshare))
52921d8d1464SDavid Hildenbrand 			return 0;
52931d8d1464SDavid Hildenbrand 		set_huge_ptep_writable(vma, haddr, ptep);
52941d8d1464SDavid Hildenbrand 		return 0;
52951d8d1464SDavid Hildenbrand 	}
52961d8d1464SDavid Hildenbrand 
52973999f52eSAneesh Kumar K.V 	pte = huge_ptep_get(ptep);
52981e8f889bSDavid Gibson 	old_page = pte_page(pte);
52991e8f889bSDavid Gibson 
5300662ce1dcSYang Yang 	delayacct_wpcopy_start();
5301662ce1dcSYang Yang 
530204f2cbe3SMel Gorman retry_avoidcopy:
5303c89357e2SDavid Hildenbrand 	/*
5304c89357e2SDavid Hildenbrand 	 * If no-one else is actually using this page, we're the exclusive
5305c89357e2SDavid Hildenbrand 	 * owner and can reuse this page.
5306c89357e2SDavid Hildenbrand 	 */
530737a2140dSJoonsoo Kim 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5308c89357e2SDavid Hildenbrand 		if (!PageAnonExclusive(old_page))
53095a49973dSHugh Dickins 			page_move_anon_rmap(old_page, vma);
5310c89357e2SDavid Hildenbrand 		if (likely(!unshare))
53115b7a1d40SHuang Ying 			set_huge_ptep_writable(vma, haddr, ptep);
5312662ce1dcSYang Yang 
5313662ce1dcSYang Yang 		delayacct_wpcopy_end();
531483c54070SNick Piggin 		return 0;
53151e8f889bSDavid Gibson 	}
53166c287605SDavid Hildenbrand 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
53176c287605SDavid Hildenbrand 		       old_page);
53181e8f889bSDavid Gibson 
531904f2cbe3SMel Gorman 	/*
532004f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
532104f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
532204f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
532304f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
532404f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
532504f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
532604f2cbe3SMel Gorman 	 * of the full address range.
532704f2cbe3SMel Gorman 	 */
53285944d011SJoonsoo Kim 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
532904f2cbe3SMel Gorman 			old_page != pagecache_page)
533004f2cbe3SMel Gorman 		outside_reserve = 1;
533104f2cbe3SMel Gorman 
533209cbfeafSKirill A. Shutemov 	get_page(old_page);
5333b76c8cfbSLarry Woodman 
5334ad4404a2SDavidlohr Bueso 	/*
5335ad4404a2SDavidlohr Bueso 	 * Drop page table lock as buddy allocator may be called. It will
5336ad4404a2SDavidlohr Bueso 	 * be acquired again before returning to the caller, as expected.
5337ad4404a2SDavidlohr Bueso 	 */
5338cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
53395b7a1d40SHuang Ying 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
53401e8f889bSDavid Gibson 
53412fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
534204f2cbe3SMel Gorman 		/*
534304f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
534404f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
534504f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
534604f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
534704f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
534804f2cbe3SMel Gorman 		 */
534904f2cbe3SMel Gorman 		if (outside_reserve) {
5350e7dd91c4SMike Kravetz 			struct address_space *mapping = vma->vm_file->f_mapping;
5351e7dd91c4SMike Kravetz 			pgoff_t idx;
5352e7dd91c4SMike Kravetz 			u32 hash;
5353e7dd91c4SMike Kravetz 
535409cbfeafSKirill A. Shutemov 			put_page(old_page);
5355e7dd91c4SMike Kravetz 			/*
5356e7dd91c4SMike Kravetz 			 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
5357e7dd91c4SMike Kravetz 			 * unmapping.  unmapping needs to hold i_mmap_rwsem
5358e7dd91c4SMike Kravetz 			 * in write mode.  Dropping i_mmap_rwsem in read mode
5359e7dd91c4SMike Kravetz 			 * here is OK as COW mappings do not interact with
5360e7dd91c4SMike Kravetz 			 * PMD sharing.
5361e7dd91c4SMike Kravetz 			 *
5362e7dd91c4SMike Kravetz 			 * Reacquire both after unmap operation.
5363e7dd91c4SMike Kravetz 			 */
5364e7dd91c4SMike Kravetz 			idx = vma_hugecache_offset(h, vma, haddr);
5365e7dd91c4SMike Kravetz 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5366e7dd91c4SMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5367e7dd91c4SMike Kravetz 			i_mmap_unlock_read(mapping);
5368e7dd91c4SMike Kravetz 
53695b7a1d40SHuang Ying 			unmap_ref_private(mm, vma, old_page, haddr);
5370e7dd91c4SMike Kravetz 
5371e7dd91c4SMike Kravetz 			i_mmap_lock_read(mapping);
5372e7dd91c4SMike Kravetz 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5373cb900f41SKirill A. Shutemov 			spin_lock(ptl);
53745b7a1d40SHuang Ying 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5375a9af0c5dSNaoya Horiguchi 			if (likely(ptep &&
5376a9af0c5dSNaoya Horiguchi 				   pte_same(huge_ptep_get(ptep), pte)))
537704f2cbe3SMel Gorman 				goto retry_avoidcopy;
5378a734bcc8SHillf Danton 			/*
5379cb900f41SKirill A. Shutemov 			 * race occurs while re-acquiring page table
5380cb900f41SKirill A. Shutemov 			 * lock, and our job is done.
5381a734bcc8SHillf Danton 			 */
5382662ce1dcSYang Yang 			delayacct_wpcopy_end();
5383a734bcc8SHillf Danton 			return 0;
538404f2cbe3SMel Gorman 		}
538504f2cbe3SMel Gorman 
53862b740303SSouptick Joarder 		ret = vmf_error(PTR_ERR(new_page));
5387ad4404a2SDavidlohr Bueso 		goto out_release_old;
53881e8f889bSDavid Gibson 	}
53891e8f889bSDavid Gibson 
53900fe6e20bSNaoya Horiguchi 	/*
53910fe6e20bSNaoya Horiguchi 	 * When the original hugepage is shared one, it does not have
53920fe6e20bSNaoya Horiguchi 	 * anon_vma prepared.
53930fe6e20bSNaoya Horiguchi 	 */
539444e2aa93SDean Nelson 	if (unlikely(anon_vma_prepare(vma))) {
5395ad4404a2SDavidlohr Bueso 		ret = VM_FAULT_OOM;
5396ad4404a2SDavidlohr Bueso 		goto out_release_all;
539744e2aa93SDean Nelson 	}
53980fe6e20bSNaoya Horiguchi 
5399974e6d66SHuang Ying 	copy_user_huge_page(new_page, old_page, address, vma,
540047ad8475SAndrea Arcangeli 			    pages_per_huge_page(h));
54010ed361deSNick Piggin 	__SetPageUptodate(new_page);
54021e8f889bSDavid Gibson 
54037269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
54046f4f13e8SJérôme Glisse 				haddr + huge_page_size(h));
5405ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5406ad4404a2SDavidlohr Bueso 
5407b76c8cfbSLarry Woodman 	/*
5408cb900f41SKirill A. Shutemov 	 * Retake the page table lock to check for racing updates
5409b76c8cfbSLarry Woodman 	 * before the page tables are altered
5410b76c8cfbSLarry Woodman 	 */
5411cb900f41SKirill A. Shutemov 	spin_lock(ptl);
54125b7a1d40SHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5413a9af0c5dSNaoya Horiguchi 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5414d6995da3SMike Kravetz 		ClearHPageRestoreReserve(new_page);
541507443a85SJoonsoo Kim 
5416c89357e2SDavid Hildenbrand 		/* Break COW or unshare */
54175b7a1d40SHuang Ying 		huge_ptep_clear_flush(vma, haddr, ptep);
5418ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5419cea86fe2SHugh Dickins 		page_remove_rmap(old_page, vma, true);
54205b7a1d40SHuang Ying 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
54211eba86c0SPasha Tatashin 		set_huge_pte_at(mm, haddr, ptep,
5422c89357e2SDavid Hildenbrand 				make_huge_pte(vma, new_page, !unshare));
54238f251a3dSMike Kravetz 		SetHPageMigratable(new_page);
54241e8f889bSDavid Gibson 		/* Make the old page be freed below */
54251e8f889bSDavid Gibson 		new_page = old_page;
54261e8f889bSDavid Gibson 	}
5427cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5428ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
5429ad4404a2SDavidlohr Bueso out_release_all:
5430c89357e2SDavid Hildenbrand 	/*
5431c89357e2SDavid Hildenbrand 	 * No restore in case of successful pagetable update (Break COW or
5432c89357e2SDavid Hildenbrand 	 * unshare)
5433c89357e2SDavid Hildenbrand 	 */
5434c7b1850dSMike Kravetz 	if (new_page != old_page)
54355b7a1d40SHuang Ying 		restore_reserve_on_error(h, vma, haddr, new_page);
543609cbfeafSKirill A. Shutemov 	put_page(new_page);
5437ad4404a2SDavidlohr Bueso out_release_old:
543809cbfeafSKirill A. Shutemov 	put_page(old_page);
54398312034fSJoonsoo Kim 
5440ad4404a2SDavidlohr Bueso 	spin_lock(ptl); /* Caller expects lock to be held */
5441662ce1dcSYang Yang 
5442662ce1dcSYang Yang 	delayacct_wpcopy_end();
5443ad4404a2SDavidlohr Bueso 	return ret;
54441e8f889bSDavid Gibson }
54451e8f889bSDavid Gibson 
54463ae77f43SHugh Dickins /*
54473ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
54483ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
54493ae77f43SHugh Dickins  */
54503ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
54512a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
54522a15efc9SHugh Dickins {
54532a15efc9SHugh Dickins 	struct address_space *mapping;
54542a15efc9SHugh Dickins 	pgoff_t idx;
54552a15efc9SHugh Dickins 	struct page *page;
54562a15efc9SHugh Dickins 
54572a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
54582a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
54592a15efc9SHugh Dickins 
54602a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
54612a15efc9SHugh Dickins 	if (page)
54622a15efc9SHugh Dickins 		put_page(page);
54632a15efc9SHugh Dickins 	return page != NULL;
54642a15efc9SHugh Dickins }
54652a15efc9SHugh Dickins 
5466ab76ad54SMike Kravetz int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
5467ab76ad54SMike Kravetz 			   pgoff_t idx)
5468ab76ad54SMike Kravetz {
5469d9ef44deSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
5470ab76ad54SMike Kravetz 	struct inode *inode = mapping->host;
5471ab76ad54SMike Kravetz 	struct hstate *h = hstate_inode(inode);
5472d9ef44deSMatthew Wilcox (Oracle) 	int err;
5473ab76ad54SMike Kravetz 
5474d9ef44deSMatthew Wilcox (Oracle) 	__folio_set_locked(folio);
5475d9ef44deSMatthew Wilcox (Oracle) 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5476d9ef44deSMatthew Wilcox (Oracle) 
5477d9ef44deSMatthew Wilcox (Oracle) 	if (unlikely(err)) {
5478d9ef44deSMatthew Wilcox (Oracle) 		__folio_clear_locked(folio);
5479ab76ad54SMike Kravetz 		return err;
5480d9ef44deSMatthew Wilcox (Oracle) 	}
5481d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
5482ab76ad54SMike Kravetz 
548322146c3cSMike Kravetz 	/*
5484d9ef44deSMatthew Wilcox (Oracle) 	 * mark folio dirty so that it will not be removed from cache/file
548522146c3cSMike Kravetz 	 * by non-hugetlbfs specific code paths.
548622146c3cSMike Kravetz 	 */
5487d9ef44deSMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
548822146c3cSMike Kravetz 
5489ab76ad54SMike Kravetz 	spin_lock(&inode->i_lock);
5490ab76ad54SMike Kravetz 	inode->i_blocks += blocks_per_huge_page(h);
5491ab76ad54SMike Kravetz 	spin_unlock(&inode->i_lock);
5492ab76ad54SMike Kravetz 	return 0;
5493ab76ad54SMike Kravetz }
5494ab76ad54SMike Kravetz 
54957677f7fdSAxel Rasmussen static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
54967677f7fdSAxel Rasmussen 						  struct address_space *mapping,
54977677f7fdSAxel Rasmussen 						  pgoff_t idx,
54987677f7fdSAxel Rasmussen 						  unsigned int flags,
54997677f7fdSAxel Rasmussen 						  unsigned long haddr,
5500824ddc60SNadav Amit 						  unsigned long addr,
55017677f7fdSAxel Rasmussen 						  unsigned long reason)
55027677f7fdSAxel Rasmussen {
55037677f7fdSAxel Rasmussen 	vm_fault_t ret;
55047677f7fdSAxel Rasmussen 	u32 hash;
55057677f7fdSAxel Rasmussen 	struct vm_fault vmf = {
55067677f7fdSAxel Rasmussen 		.vma = vma,
55077677f7fdSAxel Rasmussen 		.address = haddr,
5508824ddc60SNadav Amit 		.real_address = addr,
55097677f7fdSAxel Rasmussen 		.flags = flags,
55107677f7fdSAxel Rasmussen 
55117677f7fdSAxel Rasmussen 		/*
55127677f7fdSAxel Rasmussen 		 * Hard to debug if it ends up being
55137677f7fdSAxel Rasmussen 		 * used by a callee that assumes
55147677f7fdSAxel Rasmussen 		 * something about the other
55157677f7fdSAxel Rasmussen 		 * uninitialized fields... same as in
55167677f7fdSAxel Rasmussen 		 * memory.c
55177677f7fdSAxel Rasmussen 		 */
55187677f7fdSAxel Rasmussen 	};
55197677f7fdSAxel Rasmussen 
55207677f7fdSAxel Rasmussen 	/*
55217677f7fdSAxel Rasmussen 	 * hugetlb_fault_mutex and i_mmap_rwsem must be
55227677f7fdSAxel Rasmussen 	 * dropped before handling userfault.  Reacquire
55237677f7fdSAxel Rasmussen 	 * after handling fault to make calling code simpler.
55247677f7fdSAxel Rasmussen 	 */
55257677f7fdSAxel Rasmussen 	hash = hugetlb_fault_mutex_hash(mapping, idx);
55267677f7fdSAxel Rasmussen 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
55277677f7fdSAxel Rasmussen 	i_mmap_unlock_read(mapping);
55287677f7fdSAxel Rasmussen 	ret = handle_userfault(&vmf, reason);
55297677f7fdSAxel Rasmussen 	i_mmap_lock_read(mapping);
55307677f7fdSAxel Rasmussen 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
55317677f7fdSAxel Rasmussen 
55327677f7fdSAxel Rasmussen 	return ret;
55337677f7fdSAxel Rasmussen }
55347677f7fdSAxel Rasmussen 
55352b740303SSouptick Joarder static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
55362b740303SSouptick Joarder 			struct vm_area_struct *vma,
55378382d914SDavidlohr Bueso 			struct address_space *mapping, pgoff_t idx,
5538c64e912cSPeter Xu 			unsigned long address, pte_t *ptep,
5539c64e912cSPeter Xu 			pte_t old_pte, unsigned int flags)
5540ac9b9c66SHugh Dickins {
5541a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
55422b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
5543409eb8c2SHillf Danton 	int anon_rmap = 0;
55444c887265SAdam Litke 	unsigned long size;
55454c887265SAdam Litke 	struct page *page;
55461e8f889bSDavid Gibson 	pte_t new_pte;
5547cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
5548285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5549c7b1850dSMike Kravetz 	bool new_page, new_pagecache_page = false;
55504c887265SAdam Litke 
555104f2cbe3SMel Gorman 	/*
555204f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
555304f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
5554c89357e2SDavid Hildenbrand 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5555c89357e2SDavid Hildenbrand 	 * be obvious.
555604f2cbe3SMel Gorman 	 */
555704f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5558910154d5SGeoffrey Thomas 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
555904f2cbe3SMel Gorman 			   current->pid);
556004f2cbe3SMel Gorman 		return ret;
556104f2cbe3SMel Gorman 	}
556204f2cbe3SMel Gorman 
55634c887265SAdam Litke 	/*
556487bf91d3SMike Kravetz 	 * We can not race with truncation due to holding i_mmap_rwsem.
556587bf91d3SMike Kravetz 	 * i_size is modified when holding i_mmap_rwsem, so check here
556687bf91d3SMike Kravetz 	 * once for faults beyond end of file.
55674c887265SAdam Litke 	 */
5568a5516438SAndi Kleen 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5569ebed4bfcSHugh Dickins 	if (idx >= size)
5570ebed4bfcSHugh Dickins 		goto out;
55711a1aad8aSMike Kravetz 
5572c7b1850dSMike Kravetz 	new_page = false;
557387bf91d3SMike Kravetz 	page = find_lock_page(mapping, idx);
557487bf91d3SMike Kravetz 	if (!page) {
55757677f7fdSAxel Rasmussen 		/* Check for page in userfault range */
55761a1aad8aSMike Kravetz 		if (userfaultfd_missing(vma)) {
55777677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5578824ddc60SNadav Amit 						       flags, haddr, address,
55797677f7fdSAxel Rasmussen 						       VM_UFFD_MISSING);
55801a1aad8aSMike Kravetz 			goto out;
55811a1aad8aSMike Kravetz 		}
55821a1aad8aSMike Kravetz 
5583285b8dcaSHuang Ying 		page = alloc_huge_page(vma, haddr, 0);
55842fc39cecSAdam Litke 		if (IS_ERR(page)) {
55854643d67eSMike Kravetz 			/*
55864643d67eSMike Kravetz 			 * Returning error will result in faulting task being
55874643d67eSMike Kravetz 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
55884643d67eSMike Kravetz 			 * tasks from racing to fault in the same page which
55894643d67eSMike Kravetz 			 * could result in false unable to allocate errors.
55904643d67eSMike Kravetz 			 * Page migration does not take the fault mutex, but
55914643d67eSMike Kravetz 			 * does a clear then write of pte's under page table
55924643d67eSMike Kravetz 			 * lock.  Page fault code could race with migration,
55934643d67eSMike Kravetz 			 * notice the clear pte and try to allocate a page
55944643d67eSMike Kravetz 			 * here.  Before returning error, get ptl and make
55954643d67eSMike Kravetz 			 * sure there really is no pte entry.
55964643d67eSMike Kravetz 			 */
55974643d67eSMike Kravetz 			ptl = huge_pte_lock(h, mm, ptep);
55984643d67eSMike Kravetz 			ret = 0;
5599d83e6c8aSMiaohe Lin 			if (huge_pte_none(huge_ptep_get(ptep)))
56002b740303SSouptick Joarder 				ret = vmf_error(PTR_ERR(page));
5601d83e6c8aSMiaohe Lin 			spin_unlock(ptl);
56026bda666aSChristoph Lameter 			goto out;
56036bda666aSChristoph Lameter 		}
560447ad8475SAndrea Arcangeli 		clear_huge_page(page, address, pages_per_huge_page(h));
56050ed361deSNick Piggin 		__SetPageUptodate(page);
5606cb6acd01SMike Kravetz 		new_page = true;
5607ac9b9c66SHugh Dickins 
5608f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
5609ab76ad54SMike Kravetz 			int err = huge_add_to_page_cache(page, mapping, idx);
56106bda666aSChristoph Lameter 			if (err) {
56113a5497a2SMiaohe Lin 				/*
56123a5497a2SMiaohe Lin 				 * err can't be -EEXIST which implies someone
56133a5497a2SMiaohe Lin 				 * else consumed the reservation since hugetlb
56143a5497a2SMiaohe Lin 				 * fault mutex is held when add a hugetlb page
56153a5497a2SMiaohe Lin 				 * to the page cache. So it's safe to call
56163a5497a2SMiaohe Lin 				 * restore_reserve_on_error() here.
56173a5497a2SMiaohe Lin 				 */
56183a5497a2SMiaohe Lin 				restore_reserve_on_error(h, vma, haddr, page);
56196bda666aSChristoph Lameter 				put_page(page);
56206bda666aSChristoph Lameter 				goto out;
56216bda666aSChristoph Lameter 			}
5622c7b1850dSMike Kravetz 			new_pagecache_page = true;
562323be7468SMel Gorman 		} else {
56246bda666aSChristoph Lameter 			lock_page(page);
56250fe6e20bSNaoya Horiguchi 			if (unlikely(anon_vma_prepare(vma))) {
56260fe6e20bSNaoya Horiguchi 				ret = VM_FAULT_OOM;
56270fe6e20bSNaoya Horiguchi 				goto backout_unlocked;
562823be7468SMel Gorman 			}
5629409eb8c2SHillf Danton 			anon_rmap = 1;
56300fe6e20bSNaoya Horiguchi 		}
56310fe6e20bSNaoya Horiguchi 	} else {
563257303d80SAndy Whitcroft 		/*
5633998b4382SNaoya Horiguchi 		 * If memory error occurs between mmap() and fault, some process
5634998b4382SNaoya Horiguchi 		 * don't have hwpoisoned swap entry for errored virtual address.
5635998b4382SNaoya Horiguchi 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5636fd6a03edSNaoya Horiguchi 		 */
5637fd6a03edSNaoya Horiguchi 		if (unlikely(PageHWPoison(page))) {
56380eb98f15SMiaohe Lin 			ret = VM_FAULT_HWPOISON_LARGE |
5639972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5640fd6a03edSNaoya Horiguchi 			goto backout_unlocked;
56416bda666aSChristoph Lameter 		}
56427677f7fdSAxel Rasmussen 
56437677f7fdSAxel Rasmussen 		/* Check for page in userfault range. */
56447677f7fdSAxel Rasmussen 		if (userfaultfd_minor(vma)) {
56457677f7fdSAxel Rasmussen 			unlock_page(page);
56467677f7fdSAxel Rasmussen 			put_page(page);
56477677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5648824ddc60SNadav Amit 						       flags, haddr, address,
56497677f7fdSAxel Rasmussen 						       VM_UFFD_MINOR);
56507677f7fdSAxel Rasmussen 			goto out;
56517677f7fdSAxel Rasmussen 		}
5652998b4382SNaoya Horiguchi 	}
56531e8f889bSDavid Gibson 
565457303d80SAndy Whitcroft 	/*
565557303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
565657303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
565757303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
565857303d80SAndy Whitcroft 	 * the spinlock.
565957303d80SAndy Whitcroft 	 */
56605e911373SMike Kravetz 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5661285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
56622b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
56632b26736cSAndy Whitcroft 			goto backout_unlocked;
56642b26736cSAndy Whitcroft 		}
56655e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5666285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
56675e911373SMike Kravetz 	}
566857303d80SAndy Whitcroft 
56698bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(h, mm, ptep);
567083c54070SNick Piggin 	ret = 0;
5671c64e912cSPeter Xu 	/* If pte changed from under us, retry */
5672c64e912cSPeter Xu 	if (!pte_same(huge_ptep_get(ptep), old_pte))
56734c887265SAdam Litke 		goto backout;
56744c887265SAdam Litke 
567507443a85SJoonsoo Kim 	if (anon_rmap) {
5676d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
5677285b8dcaSHuang Ying 		hugepage_add_new_anon_rmap(page, vma, haddr);
5678ac714904SChoi Gi-yong 	} else
5679fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
56801e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
56811e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
5682c64e912cSPeter Xu 	/*
5683c64e912cSPeter Xu 	 * If this pte was previously wr-protected, keep it wr-protected even
5684c64e912cSPeter Xu 	 * if populated.
5685c64e912cSPeter Xu 	 */
5686c64e912cSPeter Xu 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5687c64e912cSPeter Xu 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5688285b8dcaSHuang Ying 	set_huge_pte_at(mm, haddr, ptep, new_pte);
56891e8f889bSDavid Gibson 
56905d317b2bSNaoya Horiguchi 	hugetlb_count_add(pages_per_huge_page(h), mm);
5691788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
56921e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
5693c89357e2SDavid Hildenbrand 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
56941e8f889bSDavid Gibson 	}
56951e8f889bSDavid Gibson 
5696cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5697cb6acd01SMike Kravetz 
5698cb6acd01SMike Kravetz 	/*
56998f251a3dSMike Kravetz 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
57008f251a3dSMike Kravetz 	 * found in the pagecache may not have HPageMigratableset if they have
57018f251a3dSMike Kravetz 	 * been isolated for migration.
5702cb6acd01SMike Kravetz 	 */
5703cb6acd01SMike Kravetz 	if (new_page)
57048f251a3dSMike Kravetz 		SetHPageMigratable(page);
5705cb6acd01SMike Kravetz 
57064c887265SAdam Litke 	unlock_page(page);
57074c887265SAdam Litke out:
5708ac9b9c66SHugh Dickins 	return ret;
57094c887265SAdam Litke 
57104c887265SAdam Litke backout:
5711cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
57122b26736cSAndy Whitcroft backout_unlocked:
57134c887265SAdam Litke 	unlock_page(page);
5714c7b1850dSMike Kravetz 	/* restore reserve for newly allocated pages not in page cache */
5715c7b1850dSMike Kravetz 	if (new_page && !new_pagecache_page)
5716285b8dcaSHuang Ying 		restore_reserve_on_error(h, vma, haddr, page);
57174c887265SAdam Litke 	put_page(page);
57184c887265SAdam Litke 	goto out;
5719ac9b9c66SHugh Dickins }
5720ac9b9c66SHugh Dickins 
57218382d914SDavidlohr Bueso #ifdef CONFIG_SMP
5722188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
57238382d914SDavidlohr Bueso {
57248382d914SDavidlohr Bueso 	unsigned long key[2];
57258382d914SDavidlohr Bueso 	u32 hash;
57268382d914SDavidlohr Bueso 
57278382d914SDavidlohr Bueso 	key[0] = (unsigned long) mapping;
57288382d914SDavidlohr Bueso 	key[1] = idx;
57298382d914SDavidlohr Bueso 
573055254636SMike Kravetz 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
57318382d914SDavidlohr Bueso 
57328382d914SDavidlohr Bueso 	return hash & (num_fault_mutexes - 1);
57338382d914SDavidlohr Bueso }
57348382d914SDavidlohr Bueso #else
57358382d914SDavidlohr Bueso /*
57366c26d310SMiaohe Lin  * For uniprocessor systems we always use a single mutex, so just
57378382d914SDavidlohr Bueso  * return 0 and avoid the hashing overhead.
57388382d914SDavidlohr Bueso  */
5739188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
57408382d914SDavidlohr Bueso {
57418382d914SDavidlohr Bueso 	return 0;
57428382d914SDavidlohr Bueso }
57438382d914SDavidlohr Bueso #endif
57448382d914SDavidlohr Bueso 
57452b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5746788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
574786e5216fSAdam Litke {
57488382d914SDavidlohr Bueso 	pte_t *ptep, entry;
5749cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
57502b740303SSouptick Joarder 	vm_fault_t ret;
57518382d914SDavidlohr Bueso 	u32 hash;
57528382d914SDavidlohr Bueso 	pgoff_t idx;
57530fe6e20bSNaoya Horiguchi 	struct page *page = NULL;
575457303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
5755a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
57568382d914SDavidlohr Bueso 	struct address_space *mapping;
57570f792cf9SNaoya Horiguchi 	int need_wait_lock = 0;
5758285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
575986e5216fSAdam Litke 
5760285b8dcaSHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5761fd6a03edSNaoya Horiguchi 	if (ptep) {
5762c0d0381aSMike Kravetz 		/*
5763c0d0381aSMike Kravetz 		 * Since we hold no locks, ptep could be stale.  That is
5764c0d0381aSMike Kravetz 		 * OK as we are only making decisions based on content and
5765c0d0381aSMike Kravetz 		 * not actually modifying content here.
5766c0d0381aSMike Kravetz 		 */
5767fd6a03edSNaoya Horiguchi 		entry = huge_ptep_get(ptep);
5768290408d4SNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5769ad1ac596SMiaohe Lin 			migration_entry_wait_huge(vma, ptep);
5770290408d4SNaoya Horiguchi 			return 0;
5771290408d4SNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5772aa50d3a7SAndi Kleen 			return VM_FAULT_HWPOISON_LARGE |
5773972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5774b43a9990SMike Kravetz 	}
57758382d914SDavidlohr Bueso 
5776c0d0381aSMike Kravetz 	/*
5777c0d0381aSMike Kravetz 	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
577887bf91d3SMike Kravetz 	 * until finished with ptep.  This serves two purposes:
577987bf91d3SMike Kravetz 	 * 1) It prevents huge_pmd_unshare from being called elsewhere
578087bf91d3SMike Kravetz 	 *    and making the ptep no longer valid.
578187bf91d3SMike Kravetz 	 * 2) It synchronizes us with i_size modifications during truncation.
5782c0d0381aSMike Kravetz 	 *
5783c0d0381aSMike Kravetz 	 * ptep could have already be assigned via huge_pte_offset.  That
5784c0d0381aSMike Kravetz 	 * is OK, as huge_pte_alloc will return the same value unless
5785c0d0381aSMike Kravetz 	 * something has changed.
5786c0d0381aSMike Kravetz 	 */
5787ddeaab32SMike Kravetz 	mapping = vma->vm_file->f_mapping;
5788c0d0381aSMike Kravetz 	i_mmap_lock_read(mapping);
5789aec44e0fSPeter Xu 	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5790c0d0381aSMike Kravetz 	if (!ptep) {
5791c0d0381aSMike Kravetz 		i_mmap_unlock_read(mapping);
5792c0d0381aSMike Kravetz 		return VM_FAULT_OOM;
5793c0d0381aSMike Kravetz 	}
5794ddeaab32SMike Kravetz 
57953935baa9SDavid Gibson 	/*
57963935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
57973935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
57983935baa9SDavid Gibson 	 * the same page in the page cache.
57993935baa9SDavid Gibson 	 */
5800c0d0381aSMike Kravetz 	idx = vma_hugecache_offset(h, vma, haddr);
5801188b04a7SWei Yang 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5802c672c7f2SMike Kravetz 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
58038382d914SDavidlohr Bueso 
58047f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
5805c64e912cSPeter Xu 	/* PTE markers should be handled the same way as none pte */
5806c64e912cSPeter Xu 	if (huge_pte_none_mostly(entry)) {
5807c64e912cSPeter Xu 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5808c64e912cSPeter Xu 				      entry, flags);
5809b4d1d99fSDavid Gibson 		goto out_mutex;
58103935baa9SDavid Gibson 	}
581186e5216fSAdam Litke 
581283c54070SNick Piggin 	ret = 0;
58131e8f889bSDavid Gibson 
581457303d80SAndy Whitcroft 	/*
58150f792cf9SNaoya Horiguchi 	 * entry could be a migration/hwpoison entry at this point, so this
58160f792cf9SNaoya Horiguchi 	 * check prevents the kernel from going below assuming that we have
58177c8de358SEthon Paul 	 * an active hugepage in pagecache. This goto expects the 2nd page
58187c8de358SEthon Paul 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
58197c8de358SEthon Paul 	 * properly handle it.
58200f792cf9SNaoya Horiguchi 	 */
58210f792cf9SNaoya Horiguchi 	if (!pte_present(entry))
58220f792cf9SNaoya Horiguchi 		goto out_mutex;
58230f792cf9SNaoya Horiguchi 
58240f792cf9SNaoya Horiguchi 	/*
5825c89357e2SDavid Hildenbrand 	 * If we are going to COW/unshare the mapping later, we examine the
5826c89357e2SDavid Hildenbrand 	 * pending reservations for this page now. This will ensure that any
582757303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
58281d8d1464SDavid Hildenbrand 	 * spinlock. Also lookup the pagecache page now as it is used to
58291d8d1464SDavid Hildenbrand 	 * determine if a reservation has been consumed.
583057303d80SAndy Whitcroft 	 */
5831c89357e2SDavid Hildenbrand 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
58321d8d1464SDavid Hildenbrand 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
5833285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
58342b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
5835b4d1d99fSDavid Gibson 			goto out_mutex;
58362b26736cSAndy Whitcroft 		}
58375e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5838285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
583957303d80SAndy Whitcroft 
584029be8426SMiaohe Lin 		pagecache_page = find_lock_page(mapping, idx);
584157303d80SAndy Whitcroft 	}
584257303d80SAndy Whitcroft 
58430f792cf9SNaoya Horiguchi 	ptl = huge_pte_lock(h, mm, ptep);
58440fe6e20bSNaoya Horiguchi 
5845c89357e2SDavid Hildenbrand 	/* Check for a racing update before calling hugetlb_wp() */
5846b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5847cb900f41SKirill A. Shutemov 		goto out_ptl;
5848b4d1d99fSDavid Gibson 
5849166f3eccSPeter Xu 	/* Handle userfault-wp first, before trying to lock more pages */
5850166f3eccSPeter Xu 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5851166f3eccSPeter Xu 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5852166f3eccSPeter Xu 		struct vm_fault vmf = {
5853166f3eccSPeter Xu 			.vma = vma,
5854166f3eccSPeter Xu 			.address = haddr,
5855166f3eccSPeter Xu 			.real_address = address,
5856166f3eccSPeter Xu 			.flags = flags,
5857166f3eccSPeter Xu 		};
5858166f3eccSPeter Xu 
5859166f3eccSPeter Xu 		spin_unlock(ptl);
5860166f3eccSPeter Xu 		if (pagecache_page) {
5861166f3eccSPeter Xu 			unlock_page(pagecache_page);
5862166f3eccSPeter Xu 			put_page(pagecache_page);
5863166f3eccSPeter Xu 		}
5864166f3eccSPeter Xu 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5865166f3eccSPeter Xu 		i_mmap_unlock_read(mapping);
5866166f3eccSPeter Xu 		return handle_userfault(&vmf, VM_UFFD_WP);
5867166f3eccSPeter Xu 	}
5868166f3eccSPeter Xu 
58690f792cf9SNaoya Horiguchi 	/*
5870c89357e2SDavid Hildenbrand 	 * hugetlb_wp() requires page locks of pte_page(entry) and
58710f792cf9SNaoya Horiguchi 	 * pagecache_page, so here we need take the former one
58720f792cf9SNaoya Horiguchi 	 * when page != pagecache_page or !pagecache_page.
58730f792cf9SNaoya Horiguchi 	 */
58740f792cf9SNaoya Horiguchi 	page = pte_page(entry);
58750f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
58760f792cf9SNaoya Horiguchi 		if (!trylock_page(page)) {
58770f792cf9SNaoya Horiguchi 			need_wait_lock = 1;
58780f792cf9SNaoya Horiguchi 			goto out_ptl;
58790f792cf9SNaoya Horiguchi 		}
58800f792cf9SNaoya Horiguchi 
58810f792cf9SNaoya Horiguchi 	get_page(page);
5882b4d1d99fSDavid Gibson 
5883c89357e2SDavid Hildenbrand 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5884106c992aSGerald Schaefer 		if (!huge_pte_write(entry)) {
5885c89357e2SDavid Hildenbrand 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5886cb900f41SKirill A. Shutemov 					 pagecache_page, ptl);
58870f792cf9SNaoya Horiguchi 			goto out_put_page;
5888c89357e2SDavid Hildenbrand 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5889106c992aSGerald Schaefer 			entry = huge_pte_mkdirty(entry);
5890b4d1d99fSDavid Gibson 		}
5891c89357e2SDavid Hildenbrand 	}
5892b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
5893285b8dcaSHuang Ying 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5894788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
5895285b8dcaSHuang Ying 		update_mmu_cache(vma, haddr, ptep);
58960f792cf9SNaoya Horiguchi out_put_page:
58970f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
58980f792cf9SNaoya Horiguchi 		unlock_page(page);
58990f792cf9SNaoya Horiguchi 	put_page(page);
5900cb900f41SKirill A. Shutemov out_ptl:
5901cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
590257303d80SAndy Whitcroft 
590357303d80SAndy Whitcroft 	if (pagecache_page) {
590457303d80SAndy Whitcroft 		unlock_page(pagecache_page);
590557303d80SAndy Whitcroft 		put_page(pagecache_page);
590657303d80SAndy Whitcroft 	}
5907b4d1d99fSDavid Gibson out_mutex:
5908c672c7f2SMike Kravetz 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5909c0d0381aSMike Kravetz 	i_mmap_unlock_read(mapping);
59100f792cf9SNaoya Horiguchi 	/*
59110f792cf9SNaoya Horiguchi 	 * Generally it's safe to hold refcount during waiting page lock. But
59120f792cf9SNaoya Horiguchi 	 * here we just wait to defer the next page fault to avoid busy loop and
59130f792cf9SNaoya Horiguchi 	 * the page is not used after unlocked before returning from the current
59140f792cf9SNaoya Horiguchi 	 * page fault. So we are safe from accessing freed page, even if we wait
59150f792cf9SNaoya Horiguchi 	 * here without taking refcount.
59160f792cf9SNaoya Horiguchi 	 */
59170f792cf9SNaoya Horiguchi 	if (need_wait_lock)
59180f792cf9SNaoya Horiguchi 		wait_on_page_locked(page);
59191e8f889bSDavid Gibson 	return ret;
592086e5216fSAdam Litke }
592186e5216fSAdam Litke 
5922714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
59238fb5debcSMike Kravetz /*
59248fb5debcSMike Kravetz  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
59258fb5debcSMike Kravetz  * modifications for huge pages.
59268fb5debcSMike Kravetz  */
59278fb5debcSMike Kravetz int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
59288fb5debcSMike Kravetz 			    pte_t *dst_pte,
59298fb5debcSMike Kravetz 			    struct vm_area_struct *dst_vma,
59308fb5debcSMike Kravetz 			    unsigned long dst_addr,
59318fb5debcSMike Kravetz 			    unsigned long src_addr,
5932f6191471SAxel Rasmussen 			    enum mcopy_atomic_mode mode,
59336041c691SPeter Xu 			    struct page **pagep,
59346041c691SPeter Xu 			    bool wp_copy)
59358fb5debcSMike Kravetz {
5936f6191471SAxel Rasmussen 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
59378cc5fcbbSMina Almasry 	struct hstate *h = hstate_vma(dst_vma);
59388cc5fcbbSMina Almasry 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
59398cc5fcbbSMina Almasry 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
59401e392147SAndrea Arcangeli 	unsigned long size;
59411c9e8defSMike Kravetz 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
59428fb5debcSMike Kravetz 	pte_t _dst_pte;
59438fb5debcSMike Kravetz 	spinlock_t *ptl;
59448cc5fcbbSMina Almasry 	int ret = -ENOMEM;
59458fb5debcSMike Kravetz 	struct page *page;
5946f6191471SAxel Rasmussen 	int writable;
5947cc30042dSMina Almasry 	bool page_in_pagecache = false;
59488fb5debcSMike Kravetz 
5949f6191471SAxel Rasmussen 	if (is_continue) {
5950f6191471SAxel Rasmussen 		ret = -EFAULT;
5951f6191471SAxel Rasmussen 		page = find_lock_page(mapping, idx);
5952f6191471SAxel Rasmussen 		if (!page)
5953f6191471SAxel Rasmussen 			goto out;
5954cc30042dSMina Almasry 		page_in_pagecache = true;
5955f6191471SAxel Rasmussen 	} else if (!*pagep) {
5956d84cf06eSMina Almasry 		/* If a page already exists, then it's UFFDIO_COPY for
5957d84cf06eSMina Almasry 		 * a non-missing case. Return -EEXIST.
5958d84cf06eSMina Almasry 		 */
5959d84cf06eSMina Almasry 		if (vm_shared &&
5960d84cf06eSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5961d84cf06eSMina Almasry 			ret = -EEXIST;
59628fb5debcSMike Kravetz 			goto out;
5963d84cf06eSMina Almasry 		}
5964d84cf06eSMina Almasry 
5965d84cf06eSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
5966d84cf06eSMina Almasry 		if (IS_ERR(page)) {
5967d84cf06eSMina Almasry 			ret = -ENOMEM;
5968d84cf06eSMina Almasry 			goto out;
5969d84cf06eSMina Almasry 		}
59708fb5debcSMike Kravetz 
59718fb5debcSMike Kravetz 		ret = copy_huge_page_from_user(page,
59728fb5debcSMike Kravetz 						(const void __user *) src_addr,
5973810a56b9SMike Kravetz 						pages_per_huge_page(h), false);
59748fb5debcSMike Kravetz 
5975c1e8d7c6SMichel Lespinasse 		/* fallback to copy_from_user outside mmap_lock */
59768fb5debcSMike Kravetz 		if (unlikely(ret)) {
59779e368259SAndrea Arcangeli 			ret = -ENOENT;
59788cc5fcbbSMina Almasry 			/* Free the allocated page which may have
59798cc5fcbbSMina Almasry 			 * consumed a reservation.
59808cc5fcbbSMina Almasry 			 */
59818cc5fcbbSMina Almasry 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
59828cc5fcbbSMina Almasry 			put_page(page);
59838cc5fcbbSMina Almasry 
59848cc5fcbbSMina Almasry 			/* Allocate a temporary page to hold the copied
59858cc5fcbbSMina Almasry 			 * contents.
59868cc5fcbbSMina Almasry 			 */
59878cc5fcbbSMina Almasry 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
59888cc5fcbbSMina Almasry 			if (!page) {
59898cc5fcbbSMina Almasry 				ret = -ENOMEM;
59908cc5fcbbSMina Almasry 				goto out;
59918cc5fcbbSMina Almasry 			}
59928fb5debcSMike Kravetz 			*pagep = page;
59938cc5fcbbSMina Almasry 			/* Set the outparam pagep and return to the caller to
59948cc5fcbbSMina Almasry 			 * copy the contents outside the lock. Don't free the
59958cc5fcbbSMina Almasry 			 * page.
59968cc5fcbbSMina Almasry 			 */
59978fb5debcSMike Kravetz 			goto out;
59988fb5debcSMike Kravetz 		}
59998fb5debcSMike Kravetz 	} else {
60008cc5fcbbSMina Almasry 		if (vm_shared &&
60018cc5fcbbSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
60028cc5fcbbSMina Almasry 			put_page(*pagep);
60038cc5fcbbSMina Almasry 			ret = -EEXIST;
60048cc5fcbbSMina Almasry 			*pagep = NULL;
60058cc5fcbbSMina Almasry 			goto out;
60068cc5fcbbSMina Almasry 		}
60078cc5fcbbSMina Almasry 
60088cc5fcbbSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
60098cc5fcbbSMina Almasry 		if (IS_ERR(page)) {
6010da9a298fSMiaohe Lin 			put_page(*pagep);
60118cc5fcbbSMina Almasry 			ret = -ENOMEM;
60128cc5fcbbSMina Almasry 			*pagep = NULL;
60138cc5fcbbSMina Almasry 			goto out;
60148cc5fcbbSMina Almasry 		}
601534892366SMuchun Song 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
601634892366SMuchun Song 				    pages_per_huge_page(h));
60178cc5fcbbSMina Almasry 		put_page(*pagep);
60188fb5debcSMike Kravetz 		*pagep = NULL;
60198fb5debcSMike Kravetz 	}
60208fb5debcSMike Kravetz 
60218fb5debcSMike Kravetz 	/*
60228fb5debcSMike Kravetz 	 * The memory barrier inside __SetPageUptodate makes sure that
60238fb5debcSMike Kravetz 	 * preceding stores to the page contents become visible before
60248fb5debcSMike Kravetz 	 * the set_pte_at() write.
60258fb5debcSMike Kravetz 	 */
60268fb5debcSMike Kravetz 	__SetPageUptodate(page);
60278fb5debcSMike Kravetz 
6028f6191471SAxel Rasmussen 	/* Add shared, newly allocated pages to the page cache. */
6029f6191471SAxel Rasmussen 	if (vm_shared && !is_continue) {
60301e392147SAndrea Arcangeli 		size = i_size_read(mapping->host) >> huge_page_shift(h);
60311e392147SAndrea Arcangeli 		ret = -EFAULT;
60321e392147SAndrea Arcangeli 		if (idx >= size)
60331e392147SAndrea Arcangeli 			goto out_release_nounlock;
60341c9e8defSMike Kravetz 
60351e392147SAndrea Arcangeli 		/*
60361e392147SAndrea Arcangeli 		 * Serialization between remove_inode_hugepages() and
60371e392147SAndrea Arcangeli 		 * huge_add_to_page_cache() below happens through the
60381e392147SAndrea Arcangeli 		 * hugetlb_fault_mutex_table that here must be hold by
60391e392147SAndrea Arcangeli 		 * the caller.
60401e392147SAndrea Arcangeli 		 */
60411c9e8defSMike Kravetz 		ret = huge_add_to_page_cache(page, mapping, idx);
60421c9e8defSMike Kravetz 		if (ret)
60431c9e8defSMike Kravetz 			goto out_release_nounlock;
6044cc30042dSMina Almasry 		page_in_pagecache = true;
60451c9e8defSMike Kravetz 	}
60461c9e8defSMike Kravetz 
6047bcc66543SMiaohe Lin 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
60488fb5debcSMike Kravetz 
60491e392147SAndrea Arcangeli 	/*
60501e392147SAndrea Arcangeli 	 * Recheck the i_size after holding PT lock to make sure not
60511e392147SAndrea Arcangeli 	 * to leave any page mapped (as page_mapped()) beyond the end
60521e392147SAndrea Arcangeli 	 * of the i_size (remove_inode_hugepages() is strict about
60531e392147SAndrea Arcangeli 	 * enforcing that). If we bail out here, we'll also leave a
60541e392147SAndrea Arcangeli 	 * page in the radix tree in the vm_shared case beyond the end
60551e392147SAndrea Arcangeli 	 * of the i_size, but remove_inode_hugepages() will take care
60561e392147SAndrea Arcangeli 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
60571e392147SAndrea Arcangeli 	 */
60581e392147SAndrea Arcangeli 	size = i_size_read(mapping->host) >> huge_page_shift(h);
60591e392147SAndrea Arcangeli 	ret = -EFAULT;
60601e392147SAndrea Arcangeli 	if (idx >= size)
60611e392147SAndrea Arcangeli 		goto out_release_unlock;
60621e392147SAndrea Arcangeli 
60638fb5debcSMike Kravetz 	ret = -EEXIST;
60646041c691SPeter Xu 	/*
60656041c691SPeter Xu 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
60666041c691SPeter Xu 	 * registered, we firstly wr-protect a none pte which has no page cache
60676041c691SPeter Xu 	 * page backing it, then access the page.
60686041c691SPeter Xu 	 */
60696041c691SPeter Xu 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
60708fb5debcSMike Kravetz 		goto out_release_unlock;
60718fb5debcSMike Kravetz 
6072ab74ef70SMiaohe Lin 	if (page_in_pagecache) {
6073fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
60741c9e8defSMike Kravetz 	} else {
6075d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
60768fb5debcSMike Kravetz 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
60771c9e8defSMike Kravetz 	}
60788fb5debcSMike Kravetz 
60796041c691SPeter Xu 	/*
60806041c691SPeter Xu 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
60816041c691SPeter Xu 	 * with wp flag set, don't set pte write bit.
60826041c691SPeter Xu 	 */
60836041c691SPeter Xu 	if (wp_copy || (is_continue && !vm_shared))
6084f6191471SAxel Rasmussen 		writable = 0;
6085f6191471SAxel Rasmussen 	else
6086f6191471SAxel Rasmussen 		writable = dst_vma->vm_flags & VM_WRITE;
6087f6191471SAxel Rasmussen 
6088f6191471SAxel Rasmussen 	_dst_pte = make_huge_pte(dst_vma, page, writable);
60896041c691SPeter Xu 	/*
60906041c691SPeter Xu 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
60916041c691SPeter Xu 	 * extremely important for hugetlbfs for now since swapping is not
60926041c691SPeter Xu 	 * supported, but we should still be clear in that this page cannot be
60936041c691SPeter Xu 	 * thrown away at will, even if write bit not set.
60946041c691SPeter Xu 	 */
60958fb5debcSMike Kravetz 	_dst_pte = huge_pte_mkdirty(_dst_pte);
60968fb5debcSMike Kravetz 	_dst_pte = pte_mkyoung(_dst_pte);
60978fb5debcSMike Kravetz 
60986041c691SPeter Xu 	if (wp_copy)
60996041c691SPeter Xu 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
61006041c691SPeter Xu 
61018fb5debcSMike Kravetz 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
61028fb5debcSMike Kravetz 
61038fb5debcSMike Kravetz 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
61048fb5debcSMike Kravetz 
61058fb5debcSMike Kravetz 	/* No need to invalidate - it was non-present before */
61068fb5debcSMike Kravetz 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
61078fb5debcSMike Kravetz 
61088fb5debcSMike Kravetz 	spin_unlock(ptl);
6109f6191471SAxel Rasmussen 	if (!is_continue)
61108f251a3dSMike Kravetz 		SetHPageMigratable(page);
6111f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
61121c9e8defSMike Kravetz 		unlock_page(page);
61138fb5debcSMike Kravetz 	ret = 0;
61148fb5debcSMike Kravetz out:
61158fb5debcSMike Kravetz 	return ret;
61168fb5debcSMike Kravetz out_release_unlock:
61178fb5debcSMike Kravetz 	spin_unlock(ptl);
6118f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
61191c9e8defSMike Kravetz 		unlock_page(page);
61205af10dfdSAndrea Arcangeli out_release_nounlock:
6121cc30042dSMina Almasry 	if (!page_in_pagecache)
6122846be085SMike Kravetz 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
61238fb5debcSMike Kravetz 	put_page(page);
61248fb5debcSMike Kravetz 	goto out;
61258fb5debcSMike Kravetz }
6126714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
61278fb5debcSMike Kravetz 
612882e5d378SJoao Martins static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
612982e5d378SJoao Martins 				 int refs, struct page **pages,
613082e5d378SJoao Martins 				 struct vm_area_struct **vmas)
613182e5d378SJoao Martins {
613282e5d378SJoao Martins 	int nr;
613382e5d378SJoao Martins 
613482e5d378SJoao Martins 	for (nr = 0; nr < refs; nr++) {
613582e5d378SJoao Martins 		if (likely(pages))
6136*14455eabSCheng Li 			pages[nr] = nth_page(page, nr);
613782e5d378SJoao Martins 		if (vmas)
613882e5d378SJoao Martins 			vmas[nr] = vma;
613982e5d378SJoao Martins 	}
614082e5d378SJoao Martins }
614182e5d378SJoao Martins 
6142a7f22660SDavid Hildenbrand static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
6143a7f22660SDavid Hildenbrand 					       bool *unshare)
6144a7f22660SDavid Hildenbrand {
6145a7f22660SDavid Hildenbrand 	pte_t pteval = huge_ptep_get(pte);
6146a7f22660SDavid Hildenbrand 
6147a7f22660SDavid Hildenbrand 	*unshare = false;
6148a7f22660SDavid Hildenbrand 	if (is_swap_pte(pteval))
6149a7f22660SDavid Hildenbrand 		return true;
6150a7f22660SDavid Hildenbrand 	if (huge_pte_write(pteval))
6151a7f22660SDavid Hildenbrand 		return false;
6152a7f22660SDavid Hildenbrand 	if (flags & FOLL_WRITE)
6153a7f22660SDavid Hildenbrand 		return true;
6154a7f22660SDavid Hildenbrand 	if (gup_must_unshare(flags, pte_page(pteval))) {
6155a7f22660SDavid Hildenbrand 		*unshare = true;
6156a7f22660SDavid Hildenbrand 		return true;
6157a7f22660SDavid Hildenbrand 	}
6158a7f22660SDavid Hildenbrand 	return false;
6159a7f22660SDavid Hildenbrand }
6160a7f22660SDavid Hildenbrand 
616128a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
616263551ae0SDavid Gibson 			 struct page **pages, struct vm_area_struct **vmas,
616328a35716SMichel Lespinasse 			 unsigned long *position, unsigned long *nr_pages,
61644f6da934SPeter Xu 			 long i, unsigned int flags, int *locked)
616563551ae0SDavid Gibson {
6166d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
6167d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
616828a35716SMichel Lespinasse 	unsigned long remainder = *nr_pages;
6169a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
61700fa5bc40SJoao Martins 	int err = -EFAULT, refs;
617163551ae0SDavid Gibson 
617263551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
617363551ae0SDavid Gibson 		pte_t *pte;
6174cb900f41SKirill A. Shutemov 		spinlock_t *ptl = NULL;
6175a7f22660SDavid Hildenbrand 		bool unshare = false;
61762a15efc9SHugh Dickins 		int absent;
617763551ae0SDavid Gibson 		struct page *page;
617863551ae0SDavid Gibson 
61794c887265SAdam Litke 		/*
618002057967SDavid Rientjes 		 * If we have a pending SIGKILL, don't keep faulting pages and
618102057967SDavid Rientjes 		 * potentially allocating memory.
618202057967SDavid Rientjes 		 */
6183fa45f116SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
618402057967SDavid Rientjes 			remainder = 0;
618502057967SDavid Rientjes 			break;
618602057967SDavid Rientjes 		}
618702057967SDavid Rientjes 
618802057967SDavid Rientjes 		/*
61894c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
61902a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
61914c887265SAdam Litke 		 * first, for the page indexing below to work.
6192cb900f41SKirill A. Shutemov 		 *
6193cb900f41SKirill A. Shutemov 		 * Note that page table lock is not held when pte is null.
61944c887265SAdam Litke 		 */
61957868a208SPunit Agrawal 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
61967868a208SPunit Agrawal 				      huge_page_size(h));
6197cb900f41SKirill A. Shutemov 		if (pte)
6198cb900f41SKirill A. Shutemov 			ptl = huge_pte_lock(h, mm, pte);
61992a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
620063551ae0SDavid Gibson 
62012a15efc9SHugh Dickins 		/*
62022a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
62033ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
62043ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
62053ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
62063ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
62072a15efc9SHugh Dickins 		 */
62083ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
62093ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6210cb900f41SKirill A. Shutemov 			if (pte)
6211cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
62122a15efc9SHugh Dickins 			remainder = 0;
62132a15efc9SHugh Dickins 			break;
62142a15efc9SHugh Dickins 		}
62152a15efc9SHugh Dickins 
62169cc3a5bdSNaoya Horiguchi 		/*
62179cc3a5bdSNaoya Horiguchi 		 * We need call hugetlb_fault for both hugepages under migration
62189cc3a5bdSNaoya Horiguchi 		 * (in which case hugetlb_fault waits for the migration,) and
62199cc3a5bdSNaoya Horiguchi 		 * hwpoisoned hugepages (in which case we need to prevent the
62209cc3a5bdSNaoya Horiguchi 		 * caller from accessing to them.) In order to do this, we use
62219cc3a5bdSNaoya Horiguchi 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
62229cc3a5bdSNaoya Horiguchi 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
62239cc3a5bdSNaoya Horiguchi 		 * both cases, and because we can't follow correct pages
62249cc3a5bdSNaoya Horiguchi 		 * directly from any kind of swap entries.
62259cc3a5bdSNaoya Horiguchi 		 */
6226a7f22660SDavid Hildenbrand 		if (absent ||
6227a7f22660SDavid Hildenbrand 		    __follow_hugetlb_must_fault(flags, pte, &unshare)) {
62282b740303SSouptick Joarder 			vm_fault_t ret;
622987ffc118SAndrea Arcangeli 			unsigned int fault_flags = 0;
62304c887265SAdam Litke 
6231cb900f41SKirill A. Shutemov 			if (pte)
6232cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
623387ffc118SAndrea Arcangeli 			if (flags & FOLL_WRITE)
623487ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_WRITE;
6235a7f22660SDavid Hildenbrand 			else if (unshare)
6236a7f22660SDavid Hildenbrand 				fault_flags |= FAULT_FLAG_UNSHARE;
62374f6da934SPeter Xu 			if (locked)
623871335f37SPeter Xu 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
623971335f37SPeter Xu 					FAULT_FLAG_KILLABLE;
624087ffc118SAndrea Arcangeli 			if (flags & FOLL_NOWAIT)
624187ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
624287ffc118SAndrea Arcangeli 					FAULT_FLAG_RETRY_NOWAIT;
624387ffc118SAndrea Arcangeli 			if (flags & FOLL_TRIED) {
62444426e945SPeter Xu 				/*
62454426e945SPeter Xu 				 * Note: FAULT_FLAG_ALLOW_RETRY and
62464426e945SPeter Xu 				 * FAULT_FLAG_TRIED can co-exist
62474426e945SPeter Xu 				 */
624887ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_TRIED;
624987ffc118SAndrea Arcangeli 			}
625087ffc118SAndrea Arcangeli 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
625187ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_ERROR) {
62522be7cfedSDaniel Jordan 				err = vm_fault_to_errno(ret, flags);
62531c59827dSHugh Dickins 				remainder = 0;
62541c59827dSHugh Dickins 				break;
62551c59827dSHugh Dickins 			}
625687ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_RETRY) {
62574f6da934SPeter Xu 				if (locked &&
62581ac25013SAndrea Arcangeli 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
62594f6da934SPeter Xu 					*locked = 0;
626087ffc118SAndrea Arcangeli 				*nr_pages = 0;
626187ffc118SAndrea Arcangeli 				/*
626287ffc118SAndrea Arcangeli 				 * VM_FAULT_RETRY must not return an
626387ffc118SAndrea Arcangeli 				 * error, it will return zero
626487ffc118SAndrea Arcangeli 				 * instead.
626587ffc118SAndrea Arcangeli 				 *
626687ffc118SAndrea Arcangeli 				 * No need to update "position" as the
626787ffc118SAndrea Arcangeli 				 * caller will not check it after
626887ffc118SAndrea Arcangeli 				 * *nr_pages is set to 0.
626987ffc118SAndrea Arcangeli 				 */
627087ffc118SAndrea Arcangeli 				return i;
627187ffc118SAndrea Arcangeli 			}
627287ffc118SAndrea Arcangeli 			continue;
627387ffc118SAndrea Arcangeli 		}
627463551ae0SDavid Gibson 
6275a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
62767f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
62778fde12caSLinus Torvalds 
6278b6a2619cSDavid Hildenbrand 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6279b6a2619cSDavid Hildenbrand 			       !PageAnonExclusive(page), page);
6280b6a2619cSDavid Hildenbrand 
62818fde12caSLinus Torvalds 		/*
6282acbfb087SZhigang Lu 		 * If subpage information not requested, update counters
6283acbfb087SZhigang Lu 		 * and skip the same_page loop below.
6284acbfb087SZhigang Lu 		 */
6285acbfb087SZhigang Lu 		if (!pages && !vmas && !pfn_offset &&
6286acbfb087SZhigang Lu 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6287acbfb087SZhigang Lu 		    (remainder >= pages_per_huge_page(h))) {
6288acbfb087SZhigang Lu 			vaddr += huge_page_size(h);
6289acbfb087SZhigang Lu 			remainder -= pages_per_huge_page(h);
6290acbfb087SZhigang Lu 			i += pages_per_huge_page(h);
6291acbfb087SZhigang Lu 			spin_unlock(ptl);
6292acbfb087SZhigang Lu 			continue;
6293acbfb087SZhigang Lu 		}
6294acbfb087SZhigang Lu 
6295d08af0a5SJoao Martins 		/* vaddr may not be aligned to PAGE_SIZE */
6296d08af0a5SJoao Martins 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6297d08af0a5SJoao Martins 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
62980fa5bc40SJoao Martins 
629982e5d378SJoao Martins 		if (pages || vmas)
6300*14455eabSCheng Li 			record_subpages_vmas(nth_page(page, pfn_offset),
630182e5d378SJoao Martins 					     vma, refs,
630282e5d378SJoao Martins 					     likely(pages) ? pages + i : NULL,
630382e5d378SJoao Martins 					     vmas ? vmas + i : NULL);
630463551ae0SDavid Gibson 
630582e5d378SJoao Martins 		if (pages) {
63060fa5bc40SJoao Martins 			/*
6307822951d8SMatthew Wilcox (Oracle) 			 * try_grab_folio() should always succeed here,
63080fa5bc40SJoao Martins 			 * because: a) we hold the ptl lock, and b) we've just
63090fa5bc40SJoao Martins 			 * checked that the huge page is present in the page
63100fa5bc40SJoao Martins 			 * tables. If the huge page is present, then the tail
63110fa5bc40SJoao Martins 			 * pages must also be present. The ptl prevents the
63120fa5bc40SJoao Martins 			 * head page and tail pages from being rearranged in
63130fa5bc40SJoao Martins 			 * any way. So this page must be available at this
63140fa5bc40SJoao Martins 			 * point, unless the page refcount overflowed:
63150fa5bc40SJoao Martins 			 */
6316822951d8SMatthew Wilcox (Oracle) 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
63170fa5bc40SJoao Martins 							 flags))) {
63180fa5bc40SJoao Martins 				spin_unlock(ptl);
63190fa5bc40SJoao Martins 				remainder = 0;
63200fa5bc40SJoao Martins 				err = -ENOMEM;
63210fa5bc40SJoao Martins 				break;
63220fa5bc40SJoao Martins 			}
6323d5d4b0aaSChen, Kenneth W 		}
632482e5d378SJoao Martins 
632582e5d378SJoao Martins 		vaddr += (refs << PAGE_SHIFT);
632682e5d378SJoao Martins 		remainder -= refs;
632782e5d378SJoao Martins 		i += refs;
632882e5d378SJoao Martins 
6329cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
633063551ae0SDavid Gibson 	}
633128a35716SMichel Lespinasse 	*nr_pages = remainder;
633287ffc118SAndrea Arcangeli 	/*
633387ffc118SAndrea Arcangeli 	 * setting position is actually required only if remainder is
633487ffc118SAndrea Arcangeli 	 * not zero but it's faster not to add a "if (remainder)"
633587ffc118SAndrea Arcangeli 	 * branch.
633687ffc118SAndrea Arcangeli 	 */
633763551ae0SDavid Gibson 	*position = vaddr;
633863551ae0SDavid Gibson 
63392be7cfedSDaniel Jordan 	return i ? i : err;
634063551ae0SDavid Gibson }
63418f860591SZhang, Yanmin 
63427da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
63435a90d5a1SPeter Xu 		unsigned long address, unsigned long end,
63445a90d5a1SPeter Xu 		pgprot_t newprot, unsigned long cp_flags)
63458f860591SZhang, Yanmin {
63468f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
63478f860591SZhang, Yanmin 	unsigned long start = address;
63488f860591SZhang, Yanmin 	pte_t *ptep;
63498f860591SZhang, Yanmin 	pte_t pte;
6350a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
635160dfaad6SPeter Xu 	unsigned long pages = 0, psize = huge_page_size(h);
6352dff11abeSMike Kravetz 	bool shared_pmd = false;
6353ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
6354e95a9851SMike Kravetz 	unsigned long last_addr_mask;
63555a90d5a1SPeter Xu 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
63565a90d5a1SPeter Xu 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6357dff11abeSMike Kravetz 
6358dff11abeSMike Kravetz 	/*
6359dff11abeSMike Kravetz 	 * In the case of shared PMDs, the area to flush could be beyond
6360ac46d4f3SJérôme Glisse 	 * start/end.  Set range.start/range.end to cover the maximum possible
6361dff11abeSMike Kravetz 	 * range if PMD sharing is possible.
6362dff11abeSMike Kravetz 	 */
63637269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
63647269f999SJérôme Glisse 				0, vma, mm, start, end);
6365ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
63668f860591SZhang, Yanmin 
63678f860591SZhang, Yanmin 	BUG_ON(address >= end);
6368ac46d4f3SJérôme Glisse 	flush_cache_range(vma, range.start, range.end);
63698f860591SZhang, Yanmin 
6370ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
6371e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
637283cde9e8SDavidlohr Bueso 	i_mmap_lock_write(vma->vm_file->f_mapping);
637360dfaad6SPeter Xu 	for (; address < end; address += psize) {
6374cb900f41SKirill A. Shutemov 		spinlock_t *ptl;
637560dfaad6SPeter Xu 		ptep = huge_pte_offset(mm, address, psize);
6376e95a9851SMike Kravetz 		if (!ptep) {
6377e95a9851SMike Kravetz 			address |= last_addr_mask;
63788f860591SZhang, Yanmin 			continue;
6379e95a9851SMike Kravetz 		}
6380cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
63814ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
638260dfaad6SPeter Xu 			/*
638360dfaad6SPeter Xu 			 * When uffd-wp is enabled on the vma, unshare
638460dfaad6SPeter Xu 			 * shouldn't happen at all.  Warn about it if it
638560dfaad6SPeter Xu 			 * happened due to some reason.
638660dfaad6SPeter Xu 			 */
638760dfaad6SPeter Xu 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
63887da4d641SPeter Zijlstra 			pages++;
6389cb900f41SKirill A. Shutemov 			spin_unlock(ptl);
6390dff11abeSMike Kravetz 			shared_pmd = true;
63914ddb4d91SMike Kravetz 			address |= last_addr_mask;
639239dde65cSChen, Kenneth W 			continue;
63937da4d641SPeter Zijlstra 		}
6394a8bda28dSNaoya Horiguchi 		pte = huge_ptep_get(ptep);
6395a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6396a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6397a8bda28dSNaoya Horiguchi 			continue;
6398a8bda28dSNaoya Horiguchi 		}
6399a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6400a8bda28dSNaoya Horiguchi 			swp_entry_t entry = pte_to_swp_entry(pte);
64016c287605SDavid Hildenbrand 			struct page *page = pfn_swap_entry_to_page(entry);
6402a8bda28dSNaoya Horiguchi 
64036c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(entry)) {
6404a8bda28dSNaoya Horiguchi 				pte_t newpte;
6405a8bda28dSNaoya Horiguchi 
64066c287605SDavid Hildenbrand 				if (PageAnon(page))
64076c287605SDavid Hildenbrand 					entry = make_readable_exclusive_migration_entry(
64086c287605SDavid Hildenbrand 								swp_offset(entry));
64096c287605SDavid Hildenbrand 				else
64104dd845b5SAlistair Popple 					entry = make_readable_migration_entry(
64114dd845b5SAlistair Popple 								swp_offset(entry));
6412a8bda28dSNaoya Horiguchi 				newpte = swp_entry_to_pte(entry);
64135a90d5a1SPeter Xu 				if (uffd_wp)
64145a90d5a1SPeter Xu 					newpte = pte_swp_mkuffd_wp(newpte);
64155a90d5a1SPeter Xu 				else if (uffd_wp_resolve)
64165a90d5a1SPeter Xu 					newpte = pte_swp_clear_uffd_wp(newpte);
641718f39629SQi Zheng 				set_huge_pte_at(mm, address, ptep, newpte);
6418a8bda28dSNaoya Horiguchi 				pages++;
6419a8bda28dSNaoya Horiguchi 			}
6420a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6421a8bda28dSNaoya Horiguchi 			continue;
6422a8bda28dSNaoya Horiguchi 		}
642360dfaad6SPeter Xu 		if (unlikely(pte_marker_uffd_wp(pte))) {
642460dfaad6SPeter Xu 			/*
642560dfaad6SPeter Xu 			 * This is changing a non-present pte into a none pte,
642660dfaad6SPeter Xu 			 * no need for huge_ptep_modify_prot_start/commit().
642760dfaad6SPeter Xu 			 */
642860dfaad6SPeter Xu 			if (uffd_wp_resolve)
642960dfaad6SPeter Xu 				huge_pte_clear(mm, address, ptep, psize);
643060dfaad6SPeter Xu 		}
6431a8bda28dSNaoya Horiguchi 		if (!huge_pte_none(pte)) {
6432023bdd00SAneesh Kumar K.V 			pte_t old_pte;
643379c1c594SChristophe Leroy 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6434023bdd00SAneesh Kumar K.V 
6435023bdd00SAneesh Kumar K.V 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
643616785bd7SAnshuman Khandual 			pte = huge_pte_modify(old_pte, newprot);
643779c1c594SChristophe Leroy 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
64385a90d5a1SPeter Xu 			if (uffd_wp)
64395a90d5a1SPeter Xu 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
64405a90d5a1SPeter Xu 			else if (uffd_wp_resolve)
64415a90d5a1SPeter Xu 				pte = huge_pte_clear_uffd_wp(pte);
6442023bdd00SAneesh Kumar K.V 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
64437da4d641SPeter Zijlstra 			pages++;
644460dfaad6SPeter Xu 		} else {
644560dfaad6SPeter Xu 			/* None pte */
644660dfaad6SPeter Xu 			if (unlikely(uffd_wp))
644760dfaad6SPeter Xu 				/* Safe to modify directly (none->non-present). */
644860dfaad6SPeter Xu 				set_huge_pte_at(mm, address, ptep,
644960dfaad6SPeter Xu 						make_pte_marker(PTE_MARKER_UFFD_WP));
64508f860591SZhang, Yanmin 		}
6451cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
64528f860591SZhang, Yanmin 	}
6453d833352aSMel Gorman 	/*
6454c8c06efaSDavidlohr Bueso 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6455d833352aSMel Gorman 	 * may have cleared our pud entry and done put_page on the page table:
6456c8c06efaSDavidlohr Bueso 	 * once we release i_mmap_rwsem, another task can do the final put_page
6457dff11abeSMike Kravetz 	 * and that page table be reused and filled with junk.  If we actually
6458dff11abeSMike Kravetz 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6459d833352aSMel Gorman 	 */
6460dff11abeSMike Kravetz 	if (shared_pmd)
6461ac46d4f3SJérôme Glisse 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6462dff11abeSMike Kravetz 	else
64635491ae7bSAneesh Kumar K.V 		flush_hugetlb_tlb_range(vma, start, end);
64640f10851eSJérôme Glisse 	/*
64650f10851eSJérôme Glisse 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
64660f10851eSJérôme Glisse 	 * page table protection not changing it to point to a new page.
64670f10851eSJérôme Glisse 	 *
6468ee65728eSMike Rapoport 	 * See Documentation/mm/mmu_notifier.rst
64690f10851eSJérôme Glisse 	 */
647083cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6471ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
64727da4d641SPeter Zijlstra 
64737da4d641SPeter Zijlstra 	return pages << h->order;
64748f860591SZhang, Yanmin }
64758f860591SZhang, Yanmin 
647633b8f84aSMike Kravetz /* Return true if reservation was successful, false otherwise.  */
647733b8f84aSMike Kravetz bool hugetlb_reserve_pages(struct inode *inode,
6478a1e78772SMel Gorman 					long from, long to,
64795a6fe125SMel Gorman 					struct vm_area_struct *vma,
6480ca16d140SKOSAKI Motohiro 					vm_flags_t vm_flags)
6481e4e574b7SAdam Litke {
648233b8f84aSMike Kravetz 	long chg, add = -1;
6483a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
648490481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
64859119a41eSJoonsoo Kim 	struct resv_map *resv_map;
6486075a61d0SMina Almasry 	struct hugetlb_cgroup *h_cg = NULL;
64870db9d74eSMina Almasry 	long gbl_reserve, regions_needed = 0;
6488e4e574b7SAdam Litke 
648963489f8eSMike Kravetz 	/* This should never happen */
649063489f8eSMike Kravetz 	if (from > to) {
649163489f8eSMike Kravetz 		VM_WARN(1, "%s called with a negative range\n", __func__);
649233b8f84aSMike Kravetz 		return false;
649363489f8eSMike Kravetz 	}
649463489f8eSMike Kravetz 
6495a1e78772SMel Gorman 	/*
649617c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
649717c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
649890481622SDavid Gibson 	 * without using reserves
649917c9d12eSMel Gorman 	 */
6500ca16d140SKOSAKI Motohiro 	if (vm_flags & VM_NORESERVE)
650133b8f84aSMike Kravetz 		return true;
650217c9d12eSMel Gorman 
650317c9d12eSMel Gorman 	/*
6504a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
6505a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
6506a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
6507a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6508a1e78772SMel Gorman 	 */
65099119a41eSJoonsoo Kim 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6510f27a5136SMike Kravetz 		/*
6511f27a5136SMike Kravetz 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6512f27a5136SMike Kravetz 		 * called for inodes for which resv_maps were created (see
6513f27a5136SMike Kravetz 		 * hugetlbfs_get_inode).
6514f27a5136SMike Kravetz 		 */
65154e35f483SJoonsoo Kim 		resv_map = inode_resv_map(inode);
65169119a41eSJoonsoo Kim 
65170db9d74eSMina Almasry 		chg = region_chg(resv_map, from, to, &regions_needed);
65189119a41eSJoonsoo Kim 
65199119a41eSJoonsoo Kim 	} else {
6520e9fe92aeSMina Almasry 		/* Private mapping. */
65219119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
65225a6fe125SMel Gorman 		if (!resv_map)
652333b8f84aSMike Kravetz 			return false;
65245a6fe125SMel Gorman 
652517c9d12eSMel Gorman 		chg = to - from;
652617c9d12eSMel Gorman 
65275a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
65285a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
65295a6fe125SMel Gorman 	}
65305a6fe125SMel Gorman 
653133b8f84aSMike Kravetz 	if (chg < 0)
6532c50ac050SDave Hansen 		goto out_err;
653317c9d12eSMel Gorman 
653433b8f84aSMike Kravetz 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
653533b8f84aSMike Kravetz 				chg * pages_per_huge_page(h), &h_cg) < 0)
6536075a61d0SMina Almasry 		goto out_err;
6537075a61d0SMina Almasry 
6538075a61d0SMina Almasry 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6539075a61d0SMina Almasry 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6540075a61d0SMina Almasry 		 * of the resv_map.
6541075a61d0SMina Almasry 		 */
6542075a61d0SMina Almasry 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6543075a61d0SMina Almasry 	}
6544075a61d0SMina Almasry 
65451c5ecae3SMike Kravetz 	/*
65461c5ecae3SMike Kravetz 	 * There must be enough pages in the subpool for the mapping. If
65471c5ecae3SMike Kravetz 	 * the subpool has a minimum size, there may be some global
65481c5ecae3SMike Kravetz 	 * reservations already in place (gbl_reserve).
65491c5ecae3SMike Kravetz 	 */
65501c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
655133b8f84aSMike Kravetz 	if (gbl_reserve < 0)
6552075a61d0SMina Almasry 		goto out_uncharge_cgroup;
655317c9d12eSMel Gorman 
655417c9d12eSMel Gorman 	/*
655517c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
655690481622SDavid Gibson 	 * Hand the pages back to the subpool if there are not
655717c9d12eSMel Gorman 	 */
655833b8f84aSMike Kravetz 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6559075a61d0SMina Almasry 		goto out_put_pages;
656017c9d12eSMel Gorman 
656117c9d12eSMel Gorman 	/*
656217c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
656317c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
656417c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
656517c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
656617c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
656717c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
656817c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
656917c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
657017c9d12eSMel Gorman 	 * else has to be done for private mappings here
657117c9d12eSMel Gorman 	 */
657233039678SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6573075a61d0SMina Almasry 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
657433039678SMike Kravetz 
65750db9d74eSMina Almasry 		if (unlikely(add < 0)) {
65760db9d74eSMina Almasry 			hugetlb_acct_memory(h, -gbl_reserve);
6577075a61d0SMina Almasry 			goto out_put_pages;
65780db9d74eSMina Almasry 		} else if (unlikely(chg > add)) {
657933039678SMike Kravetz 			/*
658033039678SMike Kravetz 			 * pages in this range were added to the reserve
658133039678SMike Kravetz 			 * map between region_chg and region_add.  This
658233039678SMike Kravetz 			 * indicates a race with alloc_huge_page.  Adjust
658333039678SMike Kravetz 			 * the subpool and reserve counts modified above
658433039678SMike Kravetz 			 * based on the difference.
658533039678SMike Kravetz 			 */
658633039678SMike Kravetz 			long rsv_adjust;
658733039678SMike Kravetz 
6588d85aecf2SMiaohe Lin 			/*
6589d85aecf2SMiaohe Lin 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6590d85aecf2SMiaohe Lin 			 * reference to h_cg->css. See comment below for detail.
6591d85aecf2SMiaohe Lin 			 */
6592075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6593075a61d0SMina Almasry 				hstate_index(h),
6594075a61d0SMina Almasry 				(chg - add) * pages_per_huge_page(h), h_cg);
6595075a61d0SMina Almasry 
659633039678SMike Kravetz 			rsv_adjust = hugepage_subpool_put_pages(spool,
659733039678SMike Kravetz 								chg - add);
659833039678SMike Kravetz 			hugetlb_acct_memory(h, -rsv_adjust);
6599d85aecf2SMiaohe Lin 		} else if (h_cg) {
6600d85aecf2SMiaohe Lin 			/*
6601d85aecf2SMiaohe Lin 			 * The file_regions will hold their own reference to
6602d85aecf2SMiaohe Lin 			 * h_cg->css. So we should release the reference held
6603d85aecf2SMiaohe Lin 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6604d85aecf2SMiaohe Lin 			 * done.
6605d85aecf2SMiaohe Lin 			 */
6606d85aecf2SMiaohe Lin 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
660733039678SMike Kravetz 		}
660833039678SMike Kravetz 	}
660933b8f84aSMike Kravetz 	return true;
661033b8f84aSMike Kravetz 
6611075a61d0SMina Almasry out_put_pages:
6612075a61d0SMina Almasry 	/* put back original number of pages, chg */
6613075a61d0SMina Almasry 	(void)hugepage_subpool_put_pages(spool, chg);
6614075a61d0SMina Almasry out_uncharge_cgroup:
6615075a61d0SMina Almasry 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6616075a61d0SMina Almasry 					    chg * pages_per_huge_page(h), h_cg);
6617c50ac050SDave Hansen out_err:
66185e911373SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE)
66190db9d74eSMina Almasry 		/* Only call region_abort if the region_chg succeeded but the
66200db9d74eSMina Almasry 		 * region_add failed or didn't run.
66210db9d74eSMina Almasry 		 */
66220db9d74eSMina Almasry 		if (chg >= 0 && add < 0)
66230db9d74eSMina Almasry 			region_abort(resv_map, from, to, regions_needed);
6624f031dd27SJoonsoo Kim 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6625f031dd27SJoonsoo Kim 		kref_put(&resv_map->refs, resv_map_release);
662633b8f84aSMike Kravetz 	return false;
6627a43a8c39SChen, Kenneth W }
6628a43a8c39SChen, Kenneth W 
6629b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6630b5cec28dSMike Kravetz 								long freed)
6631a43a8c39SChen, Kenneth W {
6632a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
66334e35f483SJoonsoo Kim 	struct resv_map *resv_map = inode_resv_map(inode);
66349119a41eSJoonsoo Kim 	long chg = 0;
663590481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
66361c5ecae3SMike Kravetz 	long gbl_reserve;
663745c682a6SKen Chen 
6638f27a5136SMike Kravetz 	/*
6639f27a5136SMike Kravetz 	 * Since this routine can be called in the evict inode path for all
6640f27a5136SMike Kravetz 	 * hugetlbfs inodes, resv_map could be NULL.
6641f27a5136SMike Kravetz 	 */
6642b5cec28dSMike Kravetz 	if (resv_map) {
6643b5cec28dSMike Kravetz 		chg = region_del(resv_map, start, end);
6644b5cec28dSMike Kravetz 		/*
6645b5cec28dSMike Kravetz 		 * region_del() can fail in the rare case where a region
6646b5cec28dSMike Kravetz 		 * must be split and another region descriptor can not be
6647b5cec28dSMike Kravetz 		 * allocated.  If end == LONG_MAX, it will not fail.
6648b5cec28dSMike Kravetz 		 */
6649b5cec28dSMike Kravetz 		if (chg < 0)
6650b5cec28dSMike Kravetz 			return chg;
6651b5cec28dSMike Kravetz 	}
6652b5cec28dSMike Kravetz 
665345c682a6SKen Chen 	spin_lock(&inode->i_lock);
6654e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
665545c682a6SKen Chen 	spin_unlock(&inode->i_lock);
665645c682a6SKen Chen 
66571c5ecae3SMike Kravetz 	/*
66581c5ecae3SMike Kravetz 	 * If the subpool has a minimum size, the number of global
66591c5ecae3SMike Kravetz 	 * reservations to be released may be adjusted.
6660dddf31a4SMiaohe Lin 	 *
6661dddf31a4SMiaohe Lin 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6662dddf31a4SMiaohe Lin 	 * won't go negative.
66631c5ecae3SMike Kravetz 	 */
66641c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
66651c5ecae3SMike Kravetz 	hugetlb_acct_memory(h, -gbl_reserve);
6666b5cec28dSMike Kravetz 
6667b5cec28dSMike Kravetz 	return 0;
6668a43a8c39SChen, Kenneth W }
666993f70f90SNaoya Horiguchi 
66703212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
66713212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
66723212b535SSteve Capper 				struct vm_area_struct *vma,
66733212b535SSteve Capper 				unsigned long addr, pgoff_t idx)
66743212b535SSteve Capper {
66753212b535SSteve Capper 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
66763212b535SSteve Capper 				svma->vm_start;
66773212b535SSteve Capper 	unsigned long sbase = saddr & PUD_MASK;
66783212b535SSteve Capper 	unsigned long s_end = sbase + PUD_SIZE;
66793212b535SSteve Capper 
66803212b535SSteve Capper 	/* Allow segments to share if only one is marked locked */
6681de60f5f1SEric B Munson 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6682de60f5f1SEric B Munson 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
66833212b535SSteve Capper 
66843212b535SSteve Capper 	/*
66853212b535SSteve Capper 	 * match the virtual addresses, permission and the alignment of the
66863212b535SSteve Capper 	 * page table page.
66873212b535SSteve Capper 	 */
66883212b535SSteve Capper 	if (pmd_index(addr) != pmd_index(saddr) ||
66893212b535SSteve Capper 	    vm_flags != svm_flags ||
669007e51edfSMiaohe Lin 	    !range_in_vma(svma, sbase, s_end))
66913212b535SSteve Capper 		return 0;
66923212b535SSteve Capper 
66933212b535SSteve Capper 	return saddr;
66943212b535SSteve Capper }
66953212b535SSteve Capper 
669631aafb45SNicholas Krause static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
66973212b535SSteve Capper {
66983212b535SSteve Capper 	unsigned long base = addr & PUD_MASK;
66993212b535SSteve Capper 	unsigned long end = base + PUD_SIZE;
67003212b535SSteve Capper 
67013212b535SSteve Capper 	/*
67023212b535SSteve Capper 	 * check on proper vm_flags and page table alignment
67033212b535SSteve Capper 	 */
6704017b1660SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
670531aafb45SNicholas Krause 		return true;
670631aafb45SNicholas Krause 	return false;
67073212b535SSteve Capper }
67083212b535SSteve Capper 
6709c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6710c1991e07SPeter Xu {
6711c1991e07SPeter Xu #ifdef CONFIG_USERFAULTFD
6712c1991e07SPeter Xu 	if (uffd_disable_huge_pmd_share(vma))
6713c1991e07SPeter Xu 		return false;
6714c1991e07SPeter Xu #endif
6715c1991e07SPeter Xu 	return vma_shareable(vma, addr);
6716c1991e07SPeter Xu }
6717c1991e07SPeter Xu 
67183212b535SSteve Capper /*
6719017b1660SMike Kravetz  * Determine if start,end range within vma could be mapped by shared pmd.
6720017b1660SMike Kravetz  * If yes, adjust start and end to cover range associated with possible
6721017b1660SMike Kravetz  * shared pmd mappings.
6722017b1660SMike Kravetz  */
6723017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6724017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6725017b1660SMike Kravetz {
6726a1ba9da8SLi Xinhai 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6727a1ba9da8SLi Xinhai 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6728017b1660SMike Kravetz 
6729a1ba9da8SLi Xinhai 	/*
6730f0953a1bSIngo Molnar 	 * vma needs to span at least one aligned PUD size, and the range
6731f0953a1bSIngo Molnar 	 * must be at least partially within in.
6732a1ba9da8SLi Xinhai 	 */
6733a1ba9da8SLi Xinhai 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6734a1ba9da8SLi Xinhai 		(*end <= v_start) || (*start >= v_end))
6735017b1660SMike Kravetz 		return;
6736017b1660SMike Kravetz 
673775802ca6SPeter Xu 	/* Extend the range to be PUD aligned for a worst case scenario */
6738a1ba9da8SLi Xinhai 	if (*start > v_start)
6739a1ba9da8SLi Xinhai 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6740017b1660SMike Kravetz 
6741a1ba9da8SLi Xinhai 	if (*end < v_end)
6742a1ba9da8SLi Xinhai 		*end = ALIGN(*end, PUD_SIZE);
6743017b1660SMike Kravetz }
6744017b1660SMike Kravetz 
6745017b1660SMike Kravetz /*
67463212b535SSteve Capper  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
67473212b535SSteve Capper  * and returns the corresponding pte. While this is not necessary for the
67483212b535SSteve Capper  * !shared pmd case because we can allocate the pmd later as well, it makes the
6749c0d0381aSMike Kravetz  * code much cleaner.
6750c0d0381aSMike Kravetz  *
67510bf7b64eSMike Kravetz  * This routine must be called with i_mmap_rwsem held in at least read mode if
67520bf7b64eSMike Kravetz  * sharing is possible.  For hugetlbfs, this prevents removal of any page
67530bf7b64eSMike Kravetz  * table entries associated with the address space.  This is important as we
67540bf7b64eSMike Kravetz  * are setting up sharing based on existing page table entries (mappings).
67553212b535SSteve Capper  */
6756aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6757aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
67583212b535SSteve Capper {
67593212b535SSteve Capper 	struct address_space *mapping = vma->vm_file->f_mapping;
67603212b535SSteve Capper 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
67613212b535SSteve Capper 			vma->vm_pgoff;
67623212b535SSteve Capper 	struct vm_area_struct *svma;
67633212b535SSteve Capper 	unsigned long saddr;
67643212b535SSteve Capper 	pte_t *spte = NULL;
67653212b535SSteve Capper 	pte_t *pte;
6766cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
67673212b535SSteve Capper 
67680bf7b64eSMike Kravetz 	i_mmap_assert_locked(mapping);
67693212b535SSteve Capper 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
67703212b535SSteve Capper 		if (svma == vma)
67713212b535SSteve Capper 			continue;
67723212b535SSteve Capper 
67733212b535SSteve Capper 		saddr = page_table_shareable(svma, vma, addr, idx);
67743212b535SSteve Capper 		if (saddr) {
67757868a208SPunit Agrawal 			spte = huge_pte_offset(svma->vm_mm, saddr,
67767868a208SPunit Agrawal 					       vma_mmu_pagesize(svma));
67773212b535SSteve Capper 			if (spte) {
67783212b535SSteve Capper 				get_page(virt_to_page(spte));
67793212b535SSteve Capper 				break;
67803212b535SSteve Capper 			}
67813212b535SSteve Capper 		}
67823212b535SSteve Capper 	}
67833212b535SSteve Capper 
67843212b535SSteve Capper 	if (!spte)
67853212b535SSteve Capper 		goto out;
67863212b535SSteve Capper 
67878bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
6788dc6c9a35SKirill A. Shutemov 	if (pud_none(*pud)) {
67893212b535SSteve Capper 		pud_populate(mm, pud,
67903212b535SSteve Capper 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6791c17b1f42SKirill A. Shutemov 		mm_inc_nr_pmds(mm);
6792dc6c9a35SKirill A. Shutemov 	} else {
67933212b535SSteve Capper 		put_page(virt_to_page(spte));
6794dc6c9a35SKirill A. Shutemov 	}
6795cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
67963212b535SSteve Capper out:
67973212b535SSteve Capper 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
67983212b535SSteve Capper 	return pte;
67993212b535SSteve Capper }
68003212b535SSteve Capper 
68013212b535SSteve Capper /*
68023212b535SSteve Capper  * unmap huge page backed by shared pte.
68033212b535SSteve Capper  *
68043212b535SSteve Capper  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
68053212b535SSteve Capper  * indicated by page_count > 1, unmap is achieved by clearing pud and
68063212b535SSteve Capper  * decrementing the ref count. If count == 1, the pte page is not shared.
68073212b535SSteve Capper  *
6808c0d0381aSMike Kravetz  * Called with page table lock held and i_mmap_rwsem held in write mode.
68093212b535SSteve Capper  *
68103212b535SSteve Capper  * returns: 1 successfully unmapped a shared pte page
68113212b535SSteve Capper  *	    0 the underlying pte page is not shared, or it is the last user
68123212b535SSteve Capper  */
681334ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
68144ddb4d91SMike Kravetz 					unsigned long addr, pte_t *ptep)
68153212b535SSteve Capper {
68164ddb4d91SMike Kravetz 	pgd_t *pgd = pgd_offset(mm, addr);
68174ddb4d91SMike Kravetz 	p4d_t *p4d = p4d_offset(pgd, addr);
68184ddb4d91SMike Kravetz 	pud_t *pud = pud_offset(p4d, addr);
68193212b535SSteve Capper 
682034ae204fSMike Kravetz 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
68213212b535SSteve Capper 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
68223212b535SSteve Capper 	if (page_count(virt_to_page(ptep)) == 1)
68233212b535SSteve Capper 		return 0;
68243212b535SSteve Capper 
68253212b535SSteve Capper 	pud_clear(pud);
68263212b535SSteve Capper 	put_page(virt_to_page(ptep));
6827dc6c9a35SKirill A. Shutemov 	mm_dec_nr_pmds(mm);
68283212b535SSteve Capper 	return 1;
68293212b535SSteve Capper }
6830c1991e07SPeter Xu 
68319e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6832aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6833aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
68349e5fc74cSSteve Capper {
68359e5fc74cSSteve Capper 	return NULL;
68369e5fc74cSSteve Capper }
6837e81f2d22SZhang Zhen 
683834ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
68394ddb4d91SMike Kravetz 				unsigned long addr, pte_t *ptep)
6840e81f2d22SZhang Zhen {
6841e81f2d22SZhang Zhen 	return 0;
6842e81f2d22SZhang Zhen }
6843017b1660SMike Kravetz 
6844017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6845017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6846017b1660SMike Kravetz {
6847017b1660SMike Kravetz }
6848c1991e07SPeter Xu 
6849c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6850c1991e07SPeter Xu {
6851c1991e07SPeter Xu 	return false;
6852c1991e07SPeter Xu }
68533212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
68543212b535SSteve Capper 
68559e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6856aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
68579e5fc74cSSteve Capper 			unsigned long addr, unsigned long sz)
68589e5fc74cSSteve Capper {
68599e5fc74cSSteve Capper 	pgd_t *pgd;
6860c2febafcSKirill A. Shutemov 	p4d_t *p4d;
68619e5fc74cSSteve Capper 	pud_t *pud;
68629e5fc74cSSteve Capper 	pte_t *pte = NULL;
68639e5fc74cSSteve Capper 
68649e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6865f4f0a3d8SKirill A. Shutemov 	p4d = p4d_alloc(mm, pgd, addr);
6866f4f0a3d8SKirill A. Shutemov 	if (!p4d)
6867f4f0a3d8SKirill A. Shutemov 		return NULL;
6868c2febafcSKirill A. Shutemov 	pud = pud_alloc(mm, p4d, addr);
68699e5fc74cSSteve Capper 	if (pud) {
68709e5fc74cSSteve Capper 		if (sz == PUD_SIZE) {
68719e5fc74cSSteve Capper 			pte = (pte_t *)pud;
68729e5fc74cSSteve Capper 		} else {
68739e5fc74cSSteve Capper 			BUG_ON(sz != PMD_SIZE);
6874c1991e07SPeter Xu 			if (want_pmd_share(vma, addr) && pud_none(*pud))
6875aec44e0fSPeter Xu 				pte = huge_pmd_share(mm, vma, addr, pud);
68769e5fc74cSSteve Capper 			else
68779e5fc74cSSteve Capper 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
68789e5fc74cSSteve Capper 		}
68799e5fc74cSSteve Capper 	}
68804e666314SMichal Hocko 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
68819e5fc74cSSteve Capper 
68829e5fc74cSSteve Capper 	return pte;
68839e5fc74cSSteve Capper }
68849e5fc74cSSteve Capper 
68859b19df29SPunit Agrawal /*
68869b19df29SPunit Agrawal  * huge_pte_offset() - Walk the page table to resolve the hugepage
68879b19df29SPunit Agrawal  * entry at address @addr
68889b19df29SPunit Agrawal  *
68898ac0b81aSLi Xinhai  * Return: Pointer to page table entry (PUD or PMD) for
68908ac0b81aSLi Xinhai  * address @addr, or NULL if a !p*d_present() entry is encountered and the
68919b19df29SPunit Agrawal  * size @sz doesn't match the hugepage size at this level of the page
68929b19df29SPunit Agrawal  * table.
68939b19df29SPunit Agrawal  */
68947868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
68957868a208SPunit Agrawal 		       unsigned long addr, unsigned long sz)
68969e5fc74cSSteve Capper {
68979e5fc74cSSteve Capper 	pgd_t *pgd;
6898c2febafcSKirill A. Shutemov 	p4d_t *p4d;
68998ac0b81aSLi Xinhai 	pud_t *pud;
69008ac0b81aSLi Xinhai 	pmd_t *pmd;
69019e5fc74cSSteve Capper 
69029e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6903c2febafcSKirill A. Shutemov 	if (!pgd_present(*pgd))
6904c2febafcSKirill A. Shutemov 		return NULL;
6905c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
6906c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
6907c2febafcSKirill A. Shutemov 		return NULL;
69089b19df29SPunit Agrawal 
6909c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
69108ac0b81aSLi Xinhai 	if (sz == PUD_SIZE)
69118ac0b81aSLi Xinhai 		/* must be pud huge, non-present or none */
69129e5fc74cSSteve Capper 		return (pte_t *)pud;
69138ac0b81aSLi Xinhai 	if (!pud_present(*pud))
69148ac0b81aSLi Xinhai 		return NULL;
69158ac0b81aSLi Xinhai 	/* must have a valid entry and size to go further */
69169b19df29SPunit Agrawal 
69179e5fc74cSSteve Capper 	pmd = pmd_offset(pud, addr);
69188ac0b81aSLi Xinhai 	/* must be pmd huge, non-present or none */
69199e5fc74cSSteve Capper 	return (pte_t *)pmd;
69209e5fc74cSSteve Capper }
69219e5fc74cSSteve Capper 
6922e95a9851SMike Kravetz /*
6923e95a9851SMike Kravetz  * Return a mask that can be used to update an address to the last huge
6924e95a9851SMike Kravetz  * page in a page table page mapping size.  Used to skip non-present
6925e95a9851SMike Kravetz  * page table entries when linearly scanning address ranges.  Architectures
6926e95a9851SMike Kravetz  * with unique huge page to page table relationships can define their own
6927e95a9851SMike Kravetz  * version of this routine.
6928e95a9851SMike Kravetz  */
6929e95a9851SMike Kravetz unsigned long hugetlb_mask_last_page(struct hstate *h)
6930e95a9851SMike Kravetz {
6931e95a9851SMike Kravetz 	unsigned long hp_size = huge_page_size(h);
6932e95a9851SMike Kravetz 
6933e95a9851SMike Kravetz 	if (hp_size == PUD_SIZE)
6934e95a9851SMike Kravetz 		return P4D_SIZE - PUD_SIZE;
6935e95a9851SMike Kravetz 	else if (hp_size == PMD_SIZE)
6936e95a9851SMike Kravetz 		return PUD_SIZE - PMD_SIZE;
6937e95a9851SMike Kravetz 	else
6938e95a9851SMike Kravetz 		return 0UL;
6939e95a9851SMike Kravetz }
6940e95a9851SMike Kravetz 
6941e95a9851SMike Kravetz #else
6942e95a9851SMike Kravetz 
6943e95a9851SMike Kravetz /* See description above.  Architectures can provide their own version. */
6944e95a9851SMike Kravetz __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
6945e95a9851SMike Kravetz {
69464ddb4d91SMike Kravetz #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
69474ddb4d91SMike Kravetz 	if (huge_page_size(h) == PMD_SIZE)
69484ddb4d91SMike Kravetz 		return PUD_SIZE - PMD_SIZE;
69494ddb4d91SMike Kravetz #endif
6950e95a9851SMike Kravetz 	return 0UL;
6951e95a9851SMike Kravetz }
6952e95a9851SMike Kravetz 
695361f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
695461f77edaSNaoya Horiguchi 
695561f77edaSNaoya Horiguchi /*
695661f77edaSNaoya Horiguchi  * These functions are overwritable if your architecture needs its own
695761f77edaSNaoya Horiguchi  * behavior.
695861f77edaSNaoya Horiguchi  */
695961f77edaSNaoya Horiguchi struct page * __weak
696061f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address,
696161f77edaSNaoya Horiguchi 			      int write)
696261f77edaSNaoya Horiguchi {
696361f77edaSNaoya Horiguchi 	return ERR_PTR(-EINVAL);
696461f77edaSNaoya Horiguchi }
696561f77edaSNaoya Horiguchi 
696661f77edaSNaoya Horiguchi struct page * __weak
69674dc71451SAneesh Kumar K.V follow_huge_pd(struct vm_area_struct *vma,
69684dc71451SAneesh Kumar K.V 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
69694dc71451SAneesh Kumar K.V {
69704dc71451SAneesh Kumar K.V 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
69714dc71451SAneesh Kumar K.V 	return NULL;
69724dc71451SAneesh Kumar K.V }
69734dc71451SAneesh Kumar K.V 
69744dc71451SAneesh Kumar K.V struct page * __weak
69759e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6976e66f17ffSNaoya Horiguchi 		pmd_t *pmd, int flags)
69779e5fc74cSSteve Capper {
6978e66f17ffSNaoya Horiguchi 	struct page *page = NULL;
6979e66f17ffSNaoya Horiguchi 	spinlock_t *ptl;
6980c9d398faSNaoya Horiguchi 	pte_t pte;
69813faa52c0SJohn Hubbard 
69828909691bSDavid Hildenbrand 	/*
69838909691bSDavid Hildenbrand 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
69848909691bSDavid Hildenbrand 	 * follow_hugetlb_page().
69858909691bSDavid Hildenbrand 	 */
69868909691bSDavid Hildenbrand 	if (WARN_ON_ONCE(flags & FOLL_PIN))
69873faa52c0SJohn Hubbard 		return NULL;
69883faa52c0SJohn Hubbard 
6989e66f17ffSNaoya Horiguchi retry:
6990e66f17ffSNaoya Horiguchi 	ptl = pmd_lockptr(mm, pmd);
6991e66f17ffSNaoya Horiguchi 	spin_lock(ptl);
6992e66f17ffSNaoya Horiguchi 	/*
6993e66f17ffSNaoya Horiguchi 	 * make sure that the address range covered by this pmd is not
6994e66f17ffSNaoya Horiguchi 	 * unmapped from other threads.
6995e66f17ffSNaoya Horiguchi 	 */
6996e66f17ffSNaoya Horiguchi 	if (!pmd_huge(*pmd))
6997e66f17ffSNaoya Horiguchi 		goto out;
6998c9d398faSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pmd);
6999c9d398faSNaoya Horiguchi 	if (pte_present(pte)) {
700097534127SGerald Schaefer 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
70013faa52c0SJohn Hubbard 		/*
70023faa52c0SJohn Hubbard 		 * try_grab_page() should always succeed here, because: a) we
70033faa52c0SJohn Hubbard 		 * hold the pmd (ptl) lock, and b) we've just checked that the
70043faa52c0SJohn Hubbard 		 * huge pmd (head) page is present in the page tables. The ptl
70053faa52c0SJohn Hubbard 		 * prevents the head page and tail pages from being rearranged
70063faa52c0SJohn Hubbard 		 * in any way. So this page must be available at this point,
70073faa52c0SJohn Hubbard 		 * unless the page refcount overflowed:
70083faa52c0SJohn Hubbard 		 */
70093faa52c0SJohn Hubbard 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
70103faa52c0SJohn Hubbard 			page = NULL;
70113faa52c0SJohn Hubbard 			goto out;
70123faa52c0SJohn Hubbard 		}
7013e66f17ffSNaoya Horiguchi 	} else {
7014c9d398faSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
7015e66f17ffSNaoya Horiguchi 			spin_unlock(ptl);
7016ad1ac596SMiaohe Lin 			__migration_entry_wait_huge((pte_t *)pmd, ptl);
7017e66f17ffSNaoya Horiguchi 			goto retry;
7018e66f17ffSNaoya Horiguchi 		}
7019e66f17ffSNaoya Horiguchi 		/*
7020e66f17ffSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
7021e66f17ffSNaoya Horiguchi 		 * follow_page_mask().
7022e66f17ffSNaoya Horiguchi 		 */
7023e66f17ffSNaoya Horiguchi 	}
7024e66f17ffSNaoya Horiguchi out:
7025e66f17ffSNaoya Horiguchi 	spin_unlock(ptl);
70269e5fc74cSSteve Capper 	return page;
70279e5fc74cSSteve Capper }
70289e5fc74cSSteve Capper 
702961f77edaSNaoya Horiguchi struct page * __weak
70309e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address,
7031e66f17ffSNaoya Horiguchi 		pud_t *pud, int flags)
70329e5fc74cSSteve Capper {
70333a194f3fSNaoya Horiguchi 	struct page *page = NULL;
70343a194f3fSNaoya Horiguchi 	spinlock_t *ptl;
70353a194f3fSNaoya Horiguchi 	pte_t pte;
70363a194f3fSNaoya Horiguchi 
70373a194f3fSNaoya Horiguchi 	if (WARN_ON_ONCE(flags & FOLL_PIN))
7038e66f17ffSNaoya Horiguchi 		return NULL;
70399e5fc74cSSteve Capper 
70403a194f3fSNaoya Horiguchi retry:
70413a194f3fSNaoya Horiguchi 	ptl = huge_pte_lock(hstate_sizelog(PUD_SHIFT), mm, (pte_t *)pud);
70423a194f3fSNaoya Horiguchi 	if (!pud_huge(*pud))
70433a194f3fSNaoya Horiguchi 		goto out;
70443a194f3fSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pud);
70453a194f3fSNaoya Horiguchi 	if (pte_present(pte)) {
70463a194f3fSNaoya Horiguchi 		page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
70473a194f3fSNaoya Horiguchi 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
70483a194f3fSNaoya Horiguchi 			page = NULL;
70493a194f3fSNaoya Horiguchi 			goto out;
70503a194f3fSNaoya Horiguchi 		}
70513a194f3fSNaoya Horiguchi 	} else {
70523a194f3fSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
70533a194f3fSNaoya Horiguchi 			spin_unlock(ptl);
70543a194f3fSNaoya Horiguchi 			__migration_entry_wait(mm, (pte_t *)pud, ptl);
70553a194f3fSNaoya Horiguchi 			goto retry;
70563a194f3fSNaoya Horiguchi 		}
70573a194f3fSNaoya Horiguchi 		/*
70583a194f3fSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
70593a194f3fSNaoya Horiguchi 		 * follow_page_mask().
70603a194f3fSNaoya Horiguchi 		 */
70613a194f3fSNaoya Horiguchi 	}
70623a194f3fSNaoya Horiguchi out:
70633a194f3fSNaoya Horiguchi 	spin_unlock(ptl);
70643a194f3fSNaoya Horiguchi 	return page;
70659e5fc74cSSteve Capper }
70669e5fc74cSSteve Capper 
7067faaa5b62SAnshuman Khandual struct page * __weak
7068faaa5b62SAnshuman Khandual follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
7069faaa5b62SAnshuman Khandual {
70703faa52c0SJohn Hubbard 	if (flags & (FOLL_GET | FOLL_PIN))
7071faaa5b62SAnshuman Khandual 		return NULL;
7072faaa5b62SAnshuman Khandual 
7073faaa5b62SAnshuman Khandual 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
7074faaa5b62SAnshuman Khandual }
7075faaa5b62SAnshuman Khandual 
70767ce82f4cSMiaohe Lin int isolate_hugetlb(struct page *page, struct list_head *list)
707731caf665SNaoya Horiguchi {
70787ce82f4cSMiaohe Lin 	int ret = 0;
7079bcc54222SNaoya Horiguchi 
7080db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
70818f251a3dSMike Kravetz 	if (!PageHeadHuge(page) ||
70828f251a3dSMike Kravetz 	    !HPageMigratable(page) ||
70830eb2df2bSMuchun Song 	    !get_page_unless_zero(page)) {
70847ce82f4cSMiaohe Lin 		ret = -EBUSY;
7085bcc54222SNaoya Horiguchi 		goto unlock;
7086bcc54222SNaoya Horiguchi 	}
70878f251a3dSMike Kravetz 	ClearHPageMigratable(page);
708831caf665SNaoya Horiguchi 	list_move_tail(&page->lru, list);
7089bcc54222SNaoya Horiguchi unlock:
7090db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
7091bcc54222SNaoya Horiguchi 	return ret;
709231caf665SNaoya Horiguchi }
709331caf665SNaoya Horiguchi 
709425182f05SNaoya Horiguchi int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
709525182f05SNaoya Horiguchi {
709625182f05SNaoya Horiguchi 	int ret = 0;
709725182f05SNaoya Horiguchi 
709825182f05SNaoya Horiguchi 	*hugetlb = false;
709925182f05SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
710025182f05SNaoya Horiguchi 	if (PageHeadHuge(page)) {
710125182f05SNaoya Horiguchi 		*hugetlb = true;
7102b283d983SNaoya Horiguchi 		if (HPageFreed(page))
7103b283d983SNaoya Horiguchi 			ret = 0;
7104b283d983SNaoya Horiguchi 		else if (HPageMigratable(page))
710525182f05SNaoya Horiguchi 			ret = get_page_unless_zero(page);
71060ed950d1SNaoya Horiguchi 		else
71070ed950d1SNaoya Horiguchi 			ret = -EBUSY;
710825182f05SNaoya Horiguchi 	}
710925182f05SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
711025182f05SNaoya Horiguchi 	return ret;
711125182f05SNaoya Horiguchi }
711225182f05SNaoya Horiguchi 
7113405ce051SNaoya Horiguchi int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
7114405ce051SNaoya Horiguchi {
7115405ce051SNaoya Horiguchi 	int ret;
7116405ce051SNaoya Horiguchi 
7117405ce051SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
7118405ce051SNaoya Horiguchi 	ret = __get_huge_page_for_hwpoison(pfn, flags);
7119405ce051SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
7120405ce051SNaoya Horiguchi 	return ret;
7121405ce051SNaoya Horiguchi }
7122405ce051SNaoya Horiguchi 
712331caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page)
712431caf665SNaoya Horiguchi {
7125db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
71268f251a3dSMike Kravetz 	SetHPageMigratable(page);
712731caf665SNaoya Horiguchi 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
7128db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
712931caf665SNaoya Horiguchi 	put_page(page);
713031caf665SNaoya Horiguchi }
7131ab5ac90aSMichal Hocko 
7132ab5ac90aSMichal Hocko void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
7133ab5ac90aSMichal Hocko {
7134ab5ac90aSMichal Hocko 	struct hstate *h = page_hstate(oldpage);
7135ab5ac90aSMichal Hocko 
7136ab5ac90aSMichal Hocko 	hugetlb_cgroup_migrate(oldpage, newpage);
7137ab5ac90aSMichal Hocko 	set_page_owner_migrate_reason(newpage, reason);
7138ab5ac90aSMichal Hocko 
7139ab5ac90aSMichal Hocko 	/*
7140ab5ac90aSMichal Hocko 	 * transfer temporary state of the new huge page. This is
7141ab5ac90aSMichal Hocko 	 * reverse to other transitions because the newpage is going to
7142ab5ac90aSMichal Hocko 	 * be final while the old one will be freed so it takes over
7143ab5ac90aSMichal Hocko 	 * the temporary status.
7144ab5ac90aSMichal Hocko 	 *
7145ab5ac90aSMichal Hocko 	 * Also note that we have to transfer the per-node surplus state
7146ab5ac90aSMichal Hocko 	 * here as well otherwise the global surplus count will not match
7147ab5ac90aSMichal Hocko 	 * the per-node's.
7148ab5ac90aSMichal Hocko 	 */
71499157c311SMike Kravetz 	if (HPageTemporary(newpage)) {
7150ab5ac90aSMichal Hocko 		int old_nid = page_to_nid(oldpage);
7151ab5ac90aSMichal Hocko 		int new_nid = page_to_nid(newpage);
7152ab5ac90aSMichal Hocko 
71539157c311SMike Kravetz 		SetHPageTemporary(oldpage);
71549157c311SMike Kravetz 		ClearHPageTemporary(newpage);
7155ab5ac90aSMichal Hocko 
71565af1ab1dSMiaohe Lin 		/*
71575af1ab1dSMiaohe Lin 		 * There is no need to transfer the per-node surplus state
71585af1ab1dSMiaohe Lin 		 * when we do not cross the node.
71595af1ab1dSMiaohe Lin 		 */
71605af1ab1dSMiaohe Lin 		if (new_nid == old_nid)
71615af1ab1dSMiaohe Lin 			return;
7162db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
7163ab5ac90aSMichal Hocko 		if (h->surplus_huge_pages_node[old_nid]) {
7164ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[old_nid]--;
7165ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[new_nid]++;
7166ab5ac90aSMichal Hocko 		}
7167db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
7168ab5ac90aSMichal Hocko 	}
7169ab5ac90aSMichal Hocko }
7170cf11e85fSRoman Gushchin 
71716dfeaff9SPeter Xu /*
71726dfeaff9SPeter Xu  * This function will unconditionally remove all the shared pmd pgtable entries
71736dfeaff9SPeter Xu  * within the specific vma for a hugetlbfs memory range.
71746dfeaff9SPeter Xu  */
71756dfeaff9SPeter Xu void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
71766dfeaff9SPeter Xu {
71776dfeaff9SPeter Xu 	struct hstate *h = hstate_vma(vma);
71786dfeaff9SPeter Xu 	unsigned long sz = huge_page_size(h);
71796dfeaff9SPeter Xu 	struct mm_struct *mm = vma->vm_mm;
71806dfeaff9SPeter Xu 	struct mmu_notifier_range range;
71816dfeaff9SPeter Xu 	unsigned long address, start, end;
71826dfeaff9SPeter Xu 	spinlock_t *ptl;
71836dfeaff9SPeter Xu 	pte_t *ptep;
71846dfeaff9SPeter Xu 
71856dfeaff9SPeter Xu 	if (!(vma->vm_flags & VM_MAYSHARE))
71866dfeaff9SPeter Xu 		return;
71876dfeaff9SPeter Xu 
71886dfeaff9SPeter Xu 	start = ALIGN(vma->vm_start, PUD_SIZE);
71896dfeaff9SPeter Xu 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
71906dfeaff9SPeter Xu 
71916dfeaff9SPeter Xu 	if (start >= end)
71926dfeaff9SPeter Xu 		return;
71936dfeaff9SPeter Xu 
71949c8bbfacSBaolin Wang 	flush_cache_range(vma, start, end);
71956dfeaff9SPeter Xu 	/*
71966dfeaff9SPeter Xu 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
71976dfeaff9SPeter Xu 	 * we have already done the PUD_SIZE alignment.
71986dfeaff9SPeter Xu 	 */
71996dfeaff9SPeter Xu 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
72006dfeaff9SPeter Xu 				start, end);
72016dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_start(&range);
72026dfeaff9SPeter Xu 	i_mmap_lock_write(vma->vm_file->f_mapping);
72036dfeaff9SPeter Xu 	for (address = start; address < end; address += PUD_SIZE) {
72046dfeaff9SPeter Xu 		ptep = huge_pte_offset(mm, address, sz);
72056dfeaff9SPeter Xu 		if (!ptep)
72066dfeaff9SPeter Xu 			continue;
72076dfeaff9SPeter Xu 		ptl = huge_pte_lock(h, mm, ptep);
72084ddb4d91SMike Kravetz 		huge_pmd_unshare(mm, vma, address, ptep);
72096dfeaff9SPeter Xu 		spin_unlock(ptl);
72106dfeaff9SPeter Xu 	}
72116dfeaff9SPeter Xu 	flush_hugetlb_tlb_range(vma, start, end);
72126dfeaff9SPeter Xu 	i_mmap_unlock_write(vma->vm_file->f_mapping);
72136dfeaff9SPeter Xu 	/*
72146dfeaff9SPeter Xu 	 * No need to call mmu_notifier_invalidate_range(), see
7215ee65728eSMike Rapoport 	 * Documentation/mm/mmu_notifier.rst.
72166dfeaff9SPeter Xu 	 */
72176dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_end(&range);
72186dfeaff9SPeter Xu }
72196dfeaff9SPeter Xu 
7220cf11e85fSRoman Gushchin #ifdef CONFIG_CMA
7221cf11e85fSRoman Gushchin static bool cma_reserve_called __initdata;
7222cf11e85fSRoman Gushchin 
7223cf11e85fSRoman Gushchin static int __init cmdline_parse_hugetlb_cma(char *p)
7224cf11e85fSRoman Gushchin {
722538e719abSBaolin Wang 	int nid, count = 0;
722638e719abSBaolin Wang 	unsigned long tmp;
722738e719abSBaolin Wang 	char *s = p;
722838e719abSBaolin Wang 
722938e719abSBaolin Wang 	while (*s) {
723038e719abSBaolin Wang 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
723138e719abSBaolin Wang 			break;
723238e719abSBaolin Wang 
723338e719abSBaolin Wang 		if (s[count] == ':') {
7234f9317f77SMike Kravetz 			if (tmp >= MAX_NUMNODES)
723538e719abSBaolin Wang 				break;
7236f9317f77SMike Kravetz 			nid = array_index_nospec(tmp, MAX_NUMNODES);
723738e719abSBaolin Wang 
723838e719abSBaolin Wang 			s += count + 1;
723938e719abSBaolin Wang 			tmp = memparse(s, &s);
724038e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = tmp;
724138e719abSBaolin Wang 			hugetlb_cma_size += tmp;
724238e719abSBaolin Wang 
724338e719abSBaolin Wang 			/*
724438e719abSBaolin Wang 			 * Skip the separator if have one, otherwise
724538e719abSBaolin Wang 			 * break the parsing.
724638e719abSBaolin Wang 			 */
724738e719abSBaolin Wang 			if (*s == ',')
724838e719abSBaolin Wang 				s++;
724938e719abSBaolin Wang 			else
725038e719abSBaolin Wang 				break;
725138e719abSBaolin Wang 		} else {
7252cf11e85fSRoman Gushchin 			hugetlb_cma_size = memparse(p, &p);
725338e719abSBaolin Wang 			break;
725438e719abSBaolin Wang 		}
725538e719abSBaolin Wang 	}
725638e719abSBaolin Wang 
7257cf11e85fSRoman Gushchin 	return 0;
7258cf11e85fSRoman Gushchin }
7259cf11e85fSRoman Gushchin 
7260cf11e85fSRoman Gushchin early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7261cf11e85fSRoman Gushchin 
7262cf11e85fSRoman Gushchin void __init hugetlb_cma_reserve(int order)
7263cf11e85fSRoman Gushchin {
7264cf11e85fSRoman Gushchin 	unsigned long size, reserved, per_node;
726538e719abSBaolin Wang 	bool node_specific_cma_alloc = false;
7266cf11e85fSRoman Gushchin 	int nid;
7267cf11e85fSRoman Gushchin 
7268cf11e85fSRoman Gushchin 	cma_reserve_called = true;
7269cf11e85fSRoman Gushchin 
7270cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size)
7271cf11e85fSRoman Gushchin 		return;
7272cf11e85fSRoman Gushchin 
727338e719abSBaolin Wang 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
727438e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] == 0)
727538e719abSBaolin Wang 			continue;
727638e719abSBaolin Wang 
727730a51400SPeng Liu 		if (!node_online(nid)) {
727838e719abSBaolin Wang 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
727938e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
728038e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
728138e719abSBaolin Wang 			continue;
728238e719abSBaolin Wang 		}
728338e719abSBaolin Wang 
728438e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
728538e719abSBaolin Wang 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
728638e719abSBaolin Wang 				nid, (PAGE_SIZE << order) / SZ_1M);
728738e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
728838e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
728938e719abSBaolin Wang 		} else {
729038e719abSBaolin Wang 			node_specific_cma_alloc = true;
729138e719abSBaolin Wang 		}
729238e719abSBaolin Wang 	}
729338e719abSBaolin Wang 
729438e719abSBaolin Wang 	/* Validate the CMA size again in case some invalid nodes specified. */
729538e719abSBaolin Wang 	if (!hugetlb_cma_size)
729638e719abSBaolin Wang 		return;
729738e719abSBaolin Wang 
7298cf11e85fSRoman Gushchin 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7299cf11e85fSRoman Gushchin 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7300cf11e85fSRoman Gushchin 			(PAGE_SIZE << order) / SZ_1M);
7301a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7302cf11e85fSRoman Gushchin 		return;
7303cf11e85fSRoman Gushchin 	}
7304cf11e85fSRoman Gushchin 
730538e719abSBaolin Wang 	if (!node_specific_cma_alloc) {
7306cf11e85fSRoman Gushchin 		/*
7307cf11e85fSRoman Gushchin 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7308cf11e85fSRoman Gushchin 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7309cf11e85fSRoman Gushchin 		 */
7310cf11e85fSRoman Gushchin 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7311cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7312cf11e85fSRoman Gushchin 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
731338e719abSBaolin Wang 	}
7314cf11e85fSRoman Gushchin 
7315cf11e85fSRoman Gushchin 	reserved = 0;
731630a51400SPeng Liu 	for_each_online_node(nid) {
7317cf11e85fSRoman Gushchin 		int res;
73182281f797SBarry Song 		char name[CMA_MAX_NAME];
7319cf11e85fSRoman Gushchin 
732038e719abSBaolin Wang 		if (node_specific_cma_alloc) {
732138e719abSBaolin Wang 			if (hugetlb_cma_size_in_node[nid] == 0)
732238e719abSBaolin Wang 				continue;
732338e719abSBaolin Wang 
732438e719abSBaolin Wang 			size = hugetlb_cma_size_in_node[nid];
732538e719abSBaolin Wang 		} else {
7326cf11e85fSRoman Gushchin 			size = min(per_node, hugetlb_cma_size - reserved);
732738e719abSBaolin Wang 		}
732838e719abSBaolin Wang 
7329cf11e85fSRoman Gushchin 		size = round_up(size, PAGE_SIZE << order);
7330cf11e85fSRoman Gushchin 
73312281f797SBarry Song 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7332a01f4390SMike Kravetz 		/*
7333a01f4390SMike Kravetz 		 * Note that 'order per bit' is based on smallest size that
7334a01f4390SMike Kravetz 		 * may be returned to CMA allocator in the case of
7335a01f4390SMike Kravetz 		 * huge page demotion.
7336a01f4390SMike Kravetz 		 */
7337a01f4390SMike Kravetz 		res = cma_declare_contiguous_nid(0, size, 0,
7338a01f4390SMike Kravetz 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
733929d0f41dSBarry Song 						 0, false, name,
7340cf11e85fSRoman Gushchin 						 &hugetlb_cma[nid], nid);
7341cf11e85fSRoman Gushchin 		if (res) {
7342cf11e85fSRoman Gushchin 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7343cf11e85fSRoman Gushchin 				res, nid);
7344cf11e85fSRoman Gushchin 			continue;
7345cf11e85fSRoman Gushchin 		}
7346cf11e85fSRoman Gushchin 
7347cf11e85fSRoman Gushchin 		reserved += size;
7348cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7349cf11e85fSRoman Gushchin 			size / SZ_1M, nid);
7350cf11e85fSRoman Gushchin 
7351cf11e85fSRoman Gushchin 		if (reserved >= hugetlb_cma_size)
7352cf11e85fSRoman Gushchin 			break;
7353cf11e85fSRoman Gushchin 	}
7354a01f4390SMike Kravetz 
7355a01f4390SMike Kravetz 	if (!reserved)
7356a01f4390SMike Kravetz 		/*
7357a01f4390SMike Kravetz 		 * hugetlb_cma_size is used to determine if allocations from
7358a01f4390SMike Kravetz 		 * cma are possible.  Set to zero if no cma regions are set up.
7359a01f4390SMike Kravetz 		 */
7360a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7361cf11e85fSRoman Gushchin }
7362cf11e85fSRoman Gushchin 
7363263b8998SMiaohe Lin static void __init hugetlb_cma_check(void)
7364cf11e85fSRoman Gushchin {
7365cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size || cma_reserve_called)
7366cf11e85fSRoman Gushchin 		return;
7367cf11e85fSRoman Gushchin 
7368cf11e85fSRoman Gushchin 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7369cf11e85fSRoman Gushchin }
7370cf11e85fSRoman Gushchin 
7371cf11e85fSRoman Gushchin #endif /* CONFIG_CMA */
7372