xref: /openbmc/linux/mm/hugetlb.c (revision 3a47c54f)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Generic hugetlb support.
46d49e352SNadia Yvette Chambers  * (C) Nadia Yvette Chambers, April 2004
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds #include <linux/list.h>
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/mm.h>
9e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
101da177e4SLinus Torvalds #include <linux/sysctl.h>
111da177e4SLinus Torvalds #include <linux/highmem.h>
12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
131da177e4SLinus Torvalds #include <linux/nodemask.h>
1463551ae0SDavid Gibson #include <linux/pagemap.h>
155da7ca86SChristoph Lameter #include <linux/mempolicy.h>
163b32123dSGideon Israel Dsouza #include <linux/compiler.h>
17aea47ff3SChristoph Lameter #include <linux/cpuset.h>
183935baa9SDavid Gibson #include <linux/mutex.h>
1997ad1087SMike Rapoport #include <linux/memblock.h>
20a3437870SNishanth Aravamudan #include <linux/sysfs.h>
215a0e3ad6STejun Heo #include <linux/slab.h>
22bbe88753SJoonsoo Kim #include <linux/sched/mm.h>
2363489f8eSMike Kravetz #include <linux/mmdebug.h>
24174cd4b1SIngo Molnar #include <linux/sched/signal.h>
250fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
26c6247f72SMatthew Wilcox #include <linux/string_helpers.h>
27fd6a03edSNaoya Horiguchi #include <linux/swap.h>
28fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
298382d914SDavidlohr Bueso #include <linux/jhash.h>
3098fa15f3SAnshuman Khandual #include <linux/numa.h>
31c77c0a8aSWaiman Long #include <linux/llist.h>
32cf11e85fSRoman Gushchin #include <linux/cma.h>
338cc5fcbbSMina Almasry #include <linux/migrate.h>
34f9317f77SMike Kravetz #include <linux/nospec.h>
35662ce1dcSYang Yang #include <linux/delayacct.h>
36b958d4d0SMuchun Song #include <linux/memory.h>
37d6606683SLinus Torvalds 
3863551ae0SDavid Gibson #include <asm/page.h>
39ca15ca40SMike Rapoport #include <asm/pgalloc.h>
4024669e58SAneesh Kumar K.V #include <asm/tlb.h>
4163551ae0SDavid Gibson 
4224669e58SAneesh Kumar K.V #include <linux/io.h>
4363551ae0SDavid Gibson #include <linux/hugetlb.h>
449dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
459a305230SLee Schermerhorn #include <linux/node.h>
46ab5ac90aSMichal Hocko #include <linux/page_owner.h>
477835e98bSNick Piggin #include "internal.h"
48f41f2ed4SMuchun Song #include "hugetlb_vmemmap.h"
491da177e4SLinus Torvalds 
50c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
51e5ff2159SAndi Kleen unsigned int default_hstate_idx;
52e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
53cf11e85fSRoman Gushchin 
54dbda8feaSBarry Song #ifdef CONFIG_CMA
55cf11e85fSRoman Gushchin static struct cma *hugetlb_cma[MAX_NUMNODES];
5638e719abSBaolin Wang static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
57a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
58a01f4390SMike Kravetz {
59a01f4390SMike Kravetz 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
60a01f4390SMike Kravetz 				1 << order);
61a01f4390SMike Kravetz }
62a01f4390SMike Kravetz #else
63a01f4390SMike Kravetz static bool hugetlb_cma_page(struct page *page, unsigned int order)
64a01f4390SMike Kravetz {
65a01f4390SMike Kravetz 	return false;
66a01f4390SMike Kravetz }
67dbda8feaSBarry Song #endif
68dbda8feaSBarry Song static unsigned long hugetlb_cma_size __initdata;
69cf11e85fSRoman Gushchin 
7053ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
7153ba51d2SJon Tollefson 
72e5ff2159SAndi Kleen /* for command line parsing */
73e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
74e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
759fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
76282f4214SMike Kravetz static bool __initdata parsed_default_hugepagesz;
77b5389086SZhenguo Yao static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
78e5ff2159SAndi Kleen 
793935baa9SDavid Gibson /*
8031caf665SNaoya Horiguchi  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
8131caf665SNaoya Horiguchi  * free_huge_pages, and surplus_huge_pages.
823935baa9SDavid Gibson  */
83c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
840bd0f9fbSEric Paris 
858382d914SDavidlohr Bueso /*
868382d914SDavidlohr Bueso  * Serializes faults on the same logical page.  This is used to
878382d914SDavidlohr Bueso  * prevent spurious OOMs when the hugepage pool is fully utilized.
888382d914SDavidlohr Bueso  */
898382d914SDavidlohr Bueso static int num_fault_mutexes;
90c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
918382d914SDavidlohr Bueso 
927ca02d0aSMike Kravetz /* Forward declaration */
937ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
947ca02d0aSMike Kravetz 
951d88433bSMiaohe Lin static inline bool subpool_is_free(struct hugepage_subpool *spool)
961d88433bSMiaohe Lin {
971d88433bSMiaohe Lin 	if (spool->count)
981d88433bSMiaohe Lin 		return false;
991d88433bSMiaohe Lin 	if (spool->max_hpages != -1)
1001d88433bSMiaohe Lin 		return spool->used_hpages == 0;
1011d88433bSMiaohe Lin 	if (spool->min_hpages != -1)
1021d88433bSMiaohe Lin 		return spool->rsv_hpages == spool->min_hpages;
1031d88433bSMiaohe Lin 
1041d88433bSMiaohe Lin 	return true;
1051d88433bSMiaohe Lin }
1061d88433bSMiaohe Lin 
107db71ef79SMike Kravetz static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
108db71ef79SMike Kravetz 						unsigned long irq_flags)
10990481622SDavid Gibson {
110db71ef79SMike Kravetz 	spin_unlock_irqrestore(&spool->lock, irq_flags);
11190481622SDavid Gibson 
11290481622SDavid Gibson 	/* If no pages are used, and no other handles to the subpool
1137c8de358SEthon Paul 	 * remain, give up any reservations based on minimum size and
1147ca02d0aSMike Kravetz 	 * free the subpool */
1151d88433bSMiaohe Lin 	if (subpool_is_free(spool)) {
1167ca02d0aSMike Kravetz 		if (spool->min_hpages != -1)
1177ca02d0aSMike Kravetz 			hugetlb_acct_memory(spool->hstate,
1187ca02d0aSMike Kravetz 						-spool->min_hpages);
11990481622SDavid Gibson 		kfree(spool);
12090481622SDavid Gibson 	}
1217ca02d0aSMike Kravetz }
12290481622SDavid Gibson 
1237ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
1247ca02d0aSMike Kravetz 						long min_hpages)
12590481622SDavid Gibson {
12690481622SDavid Gibson 	struct hugepage_subpool *spool;
12790481622SDavid Gibson 
128c6a91820SMike Kravetz 	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
12990481622SDavid Gibson 	if (!spool)
13090481622SDavid Gibson 		return NULL;
13190481622SDavid Gibson 
13290481622SDavid Gibson 	spin_lock_init(&spool->lock);
13390481622SDavid Gibson 	spool->count = 1;
1347ca02d0aSMike Kravetz 	spool->max_hpages = max_hpages;
1357ca02d0aSMike Kravetz 	spool->hstate = h;
1367ca02d0aSMike Kravetz 	spool->min_hpages = min_hpages;
1377ca02d0aSMike Kravetz 
1387ca02d0aSMike Kravetz 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1397ca02d0aSMike Kravetz 		kfree(spool);
1407ca02d0aSMike Kravetz 		return NULL;
1417ca02d0aSMike Kravetz 	}
1427ca02d0aSMike Kravetz 	spool->rsv_hpages = min_hpages;
14390481622SDavid Gibson 
14490481622SDavid Gibson 	return spool;
14590481622SDavid Gibson }
14690481622SDavid Gibson 
14790481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
14890481622SDavid Gibson {
149db71ef79SMike Kravetz 	unsigned long flags;
150db71ef79SMike Kravetz 
151db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
15290481622SDavid Gibson 	BUG_ON(!spool->count);
15390481622SDavid Gibson 	spool->count--;
154db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
15590481622SDavid Gibson }
15690481622SDavid Gibson 
1571c5ecae3SMike Kravetz /*
1581c5ecae3SMike Kravetz  * Subpool accounting for allocating and reserving pages.
1591c5ecae3SMike Kravetz  * Return -ENOMEM if there are not enough resources to satisfy the
1609e7ee400SRandy Dunlap  * request.  Otherwise, return the number of pages by which the
1611c5ecae3SMike Kravetz  * global pools must be adjusted (upward).  The returned value may
1621c5ecae3SMike Kravetz  * only be different than the passed value (delta) in the case where
1637c8de358SEthon Paul  * a subpool minimum size must be maintained.
1641c5ecae3SMike Kravetz  */
1651c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
16690481622SDavid Gibson 				      long delta)
16790481622SDavid Gibson {
1681c5ecae3SMike Kravetz 	long ret = delta;
16990481622SDavid Gibson 
17090481622SDavid Gibson 	if (!spool)
1711c5ecae3SMike Kravetz 		return ret;
17290481622SDavid Gibson 
173db71ef79SMike Kravetz 	spin_lock_irq(&spool->lock);
17490481622SDavid Gibson 
1751c5ecae3SMike Kravetz 	if (spool->max_hpages != -1) {		/* maximum size accounting */
1761c5ecae3SMike Kravetz 		if ((spool->used_hpages + delta) <= spool->max_hpages)
1771c5ecae3SMike Kravetz 			spool->used_hpages += delta;
1781c5ecae3SMike Kravetz 		else {
1791c5ecae3SMike Kravetz 			ret = -ENOMEM;
1801c5ecae3SMike Kravetz 			goto unlock_ret;
1811c5ecae3SMike Kravetz 		}
1821c5ecae3SMike Kravetz 	}
1831c5ecae3SMike Kravetz 
18409a95e29SMike Kravetz 	/* minimum size accounting */
18509a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
1861c5ecae3SMike Kravetz 		if (delta > spool->rsv_hpages) {
1871c5ecae3SMike Kravetz 			/*
1881c5ecae3SMike Kravetz 			 * Asking for more reserves than those already taken on
1891c5ecae3SMike Kravetz 			 * behalf of subpool.  Return difference.
1901c5ecae3SMike Kravetz 			 */
1911c5ecae3SMike Kravetz 			ret = delta - spool->rsv_hpages;
1921c5ecae3SMike Kravetz 			spool->rsv_hpages = 0;
1931c5ecae3SMike Kravetz 		} else {
1941c5ecae3SMike Kravetz 			ret = 0;	/* reserves already accounted for */
1951c5ecae3SMike Kravetz 			spool->rsv_hpages -= delta;
1961c5ecae3SMike Kravetz 		}
1971c5ecae3SMike Kravetz 	}
1981c5ecae3SMike Kravetz 
1991c5ecae3SMike Kravetz unlock_ret:
200db71ef79SMike Kravetz 	spin_unlock_irq(&spool->lock);
20190481622SDavid Gibson 	return ret;
20290481622SDavid Gibson }
20390481622SDavid Gibson 
2041c5ecae3SMike Kravetz /*
2051c5ecae3SMike Kravetz  * Subpool accounting for freeing and unreserving pages.
2061c5ecae3SMike Kravetz  * Return the number of global page reservations that must be dropped.
2071c5ecae3SMike Kravetz  * The return value may only be different than the passed value (delta)
2081c5ecae3SMike Kravetz  * in the case where a subpool minimum size must be maintained.
2091c5ecae3SMike Kravetz  */
2101c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
21190481622SDavid Gibson 				       long delta)
21290481622SDavid Gibson {
2131c5ecae3SMike Kravetz 	long ret = delta;
214db71ef79SMike Kravetz 	unsigned long flags;
2151c5ecae3SMike Kravetz 
21690481622SDavid Gibson 	if (!spool)
2171c5ecae3SMike Kravetz 		return delta;
21890481622SDavid Gibson 
219db71ef79SMike Kravetz 	spin_lock_irqsave(&spool->lock, flags);
2201c5ecae3SMike Kravetz 
2211c5ecae3SMike Kravetz 	if (spool->max_hpages != -1)		/* maximum size accounting */
22290481622SDavid Gibson 		spool->used_hpages -= delta;
2231c5ecae3SMike Kravetz 
22409a95e29SMike Kravetz 	 /* minimum size accounting */
22509a95e29SMike Kravetz 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
2261c5ecae3SMike Kravetz 		if (spool->rsv_hpages + delta <= spool->min_hpages)
2271c5ecae3SMike Kravetz 			ret = 0;
2281c5ecae3SMike Kravetz 		else
2291c5ecae3SMike Kravetz 			ret = spool->rsv_hpages + delta - spool->min_hpages;
2301c5ecae3SMike Kravetz 
2311c5ecae3SMike Kravetz 		spool->rsv_hpages += delta;
2321c5ecae3SMike Kravetz 		if (spool->rsv_hpages > spool->min_hpages)
2331c5ecae3SMike Kravetz 			spool->rsv_hpages = spool->min_hpages;
2341c5ecae3SMike Kravetz 	}
2351c5ecae3SMike Kravetz 
2361c5ecae3SMike Kravetz 	/*
2371c5ecae3SMike Kravetz 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
2381c5ecae3SMike Kravetz 	 * quota reference, free it now.
2391c5ecae3SMike Kravetz 	 */
240db71ef79SMike Kravetz 	unlock_or_release_subpool(spool, flags);
2411c5ecae3SMike Kravetz 
2421c5ecae3SMike Kravetz 	return ret;
24390481622SDavid Gibson }
24490481622SDavid Gibson 
24590481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
24690481622SDavid Gibson {
24790481622SDavid Gibson 	return HUGETLBFS_SB(inode->i_sb)->spool;
24890481622SDavid Gibson }
24990481622SDavid Gibson 
25090481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
25190481622SDavid Gibson {
252496ad9aaSAl Viro 	return subpool_inode(file_inode(vma->vm_file));
25390481622SDavid Gibson }
25490481622SDavid Gibson 
2550db9d74eSMina Almasry /* Helper that removes a struct file_region from the resv_map cache and returns
2560db9d74eSMina Almasry  * it for use.
2570db9d74eSMina Almasry  */
2580db9d74eSMina Almasry static struct file_region *
2590db9d74eSMina Almasry get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
2600db9d74eSMina Almasry {
2613259914fSXU pengfei 	struct file_region *nrg;
2620db9d74eSMina Almasry 
2630db9d74eSMina Almasry 	VM_BUG_ON(resv->region_cache_count <= 0);
2640db9d74eSMina Almasry 
2650db9d74eSMina Almasry 	resv->region_cache_count--;
2660db9d74eSMina Almasry 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
2670db9d74eSMina Almasry 	list_del(&nrg->link);
2680db9d74eSMina Almasry 
2690db9d74eSMina Almasry 	nrg->from = from;
2700db9d74eSMina Almasry 	nrg->to = to;
2710db9d74eSMina Almasry 
2720db9d74eSMina Almasry 	return nrg;
2730db9d74eSMina Almasry }
2740db9d74eSMina Almasry 
275075a61d0SMina Almasry static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
276075a61d0SMina Almasry 					      struct file_region *rg)
277075a61d0SMina Almasry {
278075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
279075a61d0SMina Almasry 	nrg->reservation_counter = rg->reservation_counter;
280075a61d0SMina Almasry 	nrg->css = rg->css;
281075a61d0SMina Almasry 	if (rg->css)
282075a61d0SMina Almasry 		css_get(rg->css);
283075a61d0SMina Almasry #endif
284075a61d0SMina Almasry }
285075a61d0SMina Almasry 
286075a61d0SMina Almasry /* Helper that records hugetlb_cgroup uncharge info. */
287075a61d0SMina Almasry static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
288075a61d0SMina Almasry 						struct hstate *h,
289075a61d0SMina Almasry 						struct resv_map *resv,
290075a61d0SMina Almasry 						struct file_region *nrg)
291075a61d0SMina Almasry {
292075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
293075a61d0SMina Almasry 	if (h_cg) {
294075a61d0SMina Almasry 		nrg->reservation_counter =
295075a61d0SMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
296075a61d0SMina Almasry 		nrg->css = &h_cg->css;
297d85aecf2SMiaohe Lin 		/*
298d85aecf2SMiaohe Lin 		 * The caller will hold exactly one h_cg->css reference for the
299d85aecf2SMiaohe Lin 		 * whole contiguous reservation region. But this area might be
300d85aecf2SMiaohe Lin 		 * scattered when there are already some file_regions reside in
301d85aecf2SMiaohe Lin 		 * it. As a result, many file_regions may share only one css
302d85aecf2SMiaohe Lin 		 * reference. In order to ensure that one file_region must hold
303d85aecf2SMiaohe Lin 		 * exactly one h_cg->css reference, we should do css_get for
304d85aecf2SMiaohe Lin 		 * each file_region and leave the reference held by caller
305d85aecf2SMiaohe Lin 		 * untouched.
306d85aecf2SMiaohe Lin 		 */
307d85aecf2SMiaohe Lin 		css_get(&h_cg->css);
308075a61d0SMina Almasry 		if (!resv->pages_per_hpage)
309075a61d0SMina Almasry 			resv->pages_per_hpage = pages_per_huge_page(h);
310075a61d0SMina Almasry 		/* pages_per_hpage should be the same for all entries in
311075a61d0SMina Almasry 		 * a resv_map.
312075a61d0SMina Almasry 		 */
313075a61d0SMina Almasry 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
314075a61d0SMina Almasry 	} else {
315075a61d0SMina Almasry 		nrg->reservation_counter = NULL;
316075a61d0SMina Almasry 		nrg->css = NULL;
317075a61d0SMina Almasry 	}
318075a61d0SMina Almasry #endif
319075a61d0SMina Almasry }
320075a61d0SMina Almasry 
321d85aecf2SMiaohe Lin static void put_uncharge_info(struct file_region *rg)
322d85aecf2SMiaohe Lin {
323d85aecf2SMiaohe Lin #ifdef CONFIG_CGROUP_HUGETLB
324d85aecf2SMiaohe Lin 	if (rg->css)
325d85aecf2SMiaohe Lin 		css_put(rg->css);
326d85aecf2SMiaohe Lin #endif
327d85aecf2SMiaohe Lin }
328d85aecf2SMiaohe Lin 
329a9b3f867SMina Almasry static bool has_same_uncharge_info(struct file_region *rg,
330a9b3f867SMina Almasry 				   struct file_region *org)
331a9b3f867SMina Almasry {
332a9b3f867SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
3330739eb43SBaolin Wang 	return rg->reservation_counter == org->reservation_counter &&
334a9b3f867SMina Almasry 	       rg->css == org->css;
335a9b3f867SMina Almasry 
336a9b3f867SMina Almasry #else
337a9b3f867SMina Almasry 	return true;
338a9b3f867SMina Almasry #endif
339a9b3f867SMina Almasry }
340a9b3f867SMina Almasry 
341a9b3f867SMina Almasry static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
342a9b3f867SMina Almasry {
3433259914fSXU pengfei 	struct file_region *nrg, *prg;
344a9b3f867SMina Almasry 
345a9b3f867SMina Almasry 	prg = list_prev_entry(rg, link);
346a9b3f867SMina Almasry 	if (&prg->link != &resv->regions && prg->to == rg->from &&
347a9b3f867SMina Almasry 	    has_same_uncharge_info(prg, rg)) {
348a9b3f867SMina Almasry 		prg->to = rg->to;
349a9b3f867SMina Almasry 
350a9b3f867SMina Almasry 		list_del(&rg->link);
351d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
352a9b3f867SMina Almasry 		kfree(rg);
353a9b3f867SMina Almasry 
3547db5e7b6SWei Yang 		rg = prg;
355a9b3f867SMina Almasry 	}
356a9b3f867SMina Almasry 
357a9b3f867SMina Almasry 	nrg = list_next_entry(rg, link);
358a9b3f867SMina Almasry 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
359a9b3f867SMina Almasry 	    has_same_uncharge_info(nrg, rg)) {
360a9b3f867SMina Almasry 		nrg->from = rg->from;
361a9b3f867SMina Almasry 
362a9b3f867SMina Almasry 		list_del(&rg->link);
363d85aecf2SMiaohe Lin 		put_uncharge_info(rg);
364a9b3f867SMina Almasry 		kfree(rg);
365a9b3f867SMina Almasry 	}
366a9b3f867SMina Almasry }
367a9b3f867SMina Almasry 
3682103cf9cSPeter Xu static inline long
36984448c8eSJakob Koschel hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
3702103cf9cSPeter Xu 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
3712103cf9cSPeter Xu 		     long *regions_needed)
3722103cf9cSPeter Xu {
3732103cf9cSPeter Xu 	struct file_region *nrg;
3742103cf9cSPeter Xu 
3752103cf9cSPeter Xu 	if (!regions_needed) {
3762103cf9cSPeter Xu 		nrg = get_file_region_entry_from_cache(map, from, to);
3772103cf9cSPeter Xu 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
37884448c8eSJakob Koschel 		list_add(&nrg->link, rg);
3792103cf9cSPeter Xu 		coalesce_file_region(map, nrg);
3802103cf9cSPeter Xu 	} else
3812103cf9cSPeter Xu 		*regions_needed += 1;
3822103cf9cSPeter Xu 
3832103cf9cSPeter Xu 	return to - from;
3842103cf9cSPeter Xu }
3852103cf9cSPeter Xu 
386972a3da3SWei Yang /*
387972a3da3SWei Yang  * Must be called with resv->lock held.
388972a3da3SWei Yang  *
389972a3da3SWei Yang  * Calling this with regions_needed != NULL will count the number of pages
390972a3da3SWei Yang  * to be added but will not modify the linked list. And regions_needed will
391972a3da3SWei Yang  * indicate the number of file_regions needed in the cache to carry out to add
392972a3da3SWei Yang  * the regions for this range.
393d75c6af9SMina Almasry  */
394d75c6af9SMina Almasry static long add_reservation_in_range(struct resv_map *resv, long f, long t,
395075a61d0SMina Almasry 				     struct hugetlb_cgroup *h_cg,
396972a3da3SWei Yang 				     struct hstate *h, long *regions_needed)
397d75c6af9SMina Almasry {
3980db9d74eSMina Almasry 	long add = 0;
399d75c6af9SMina Almasry 	struct list_head *head = &resv->regions;
4000db9d74eSMina Almasry 	long last_accounted_offset = f;
40184448c8eSJakob Koschel 	struct file_region *iter, *trg = NULL;
40284448c8eSJakob Koschel 	struct list_head *rg = NULL;
403d75c6af9SMina Almasry 
4040db9d74eSMina Almasry 	if (regions_needed)
4050db9d74eSMina Almasry 		*regions_needed = 0;
406d75c6af9SMina Almasry 
4070db9d74eSMina Almasry 	/* In this loop, we essentially handle an entry for the range
40884448c8eSJakob Koschel 	 * [last_accounted_offset, iter->from), at every iteration, with some
4090db9d74eSMina Almasry 	 * bounds checking.
4100db9d74eSMina Almasry 	 */
41184448c8eSJakob Koschel 	list_for_each_entry_safe(iter, trg, head, link) {
4120db9d74eSMina Almasry 		/* Skip irrelevant regions that start before our range. */
41384448c8eSJakob Koschel 		if (iter->from < f) {
4140db9d74eSMina Almasry 			/* If this region ends after the last accounted offset,
4150db9d74eSMina Almasry 			 * then we need to update last_accounted_offset.
4160db9d74eSMina Almasry 			 */
41784448c8eSJakob Koschel 			if (iter->to > last_accounted_offset)
41884448c8eSJakob Koschel 				last_accounted_offset = iter->to;
4190db9d74eSMina Almasry 			continue;
4200db9d74eSMina Almasry 		}
421d75c6af9SMina Almasry 
4220db9d74eSMina Almasry 		/* When we find a region that starts beyond our range, we've
4230db9d74eSMina Almasry 		 * finished.
4240db9d74eSMina Almasry 		 */
42584448c8eSJakob Koschel 		if (iter->from >= t) {
42684448c8eSJakob Koschel 			rg = iter->link.prev;
427d75c6af9SMina Almasry 			break;
42884448c8eSJakob Koschel 		}
429d75c6af9SMina Almasry 
43084448c8eSJakob Koschel 		/* Add an entry for last_accounted_offset -> iter->from, and
4310db9d74eSMina Almasry 		 * update last_accounted_offset.
432d75c6af9SMina Almasry 		 */
43384448c8eSJakob Koschel 		if (iter->from > last_accounted_offset)
43484448c8eSJakob Koschel 			add += hugetlb_resv_map_add(resv, iter->link.prev,
4352103cf9cSPeter Xu 						    last_accounted_offset,
43684448c8eSJakob Koschel 						    iter->from, h, h_cg,
4372103cf9cSPeter Xu 						    regions_needed);
438d75c6af9SMina Almasry 
43984448c8eSJakob Koschel 		last_accounted_offset = iter->to;
4400db9d74eSMina Almasry 	}
4410db9d74eSMina Almasry 
4420db9d74eSMina Almasry 	/* Handle the case where our range extends beyond
4430db9d74eSMina Almasry 	 * last_accounted_offset.
4440db9d74eSMina Almasry 	 */
44584448c8eSJakob Koschel 	if (!rg)
44684448c8eSJakob Koschel 		rg = head->prev;
4472103cf9cSPeter Xu 	if (last_accounted_offset < t)
4482103cf9cSPeter Xu 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
4492103cf9cSPeter Xu 					    t, h, h_cg, regions_needed);
4500db9d74eSMina Almasry 
4510db9d74eSMina Almasry 	return add;
4520db9d74eSMina Almasry }
4530db9d74eSMina Almasry 
4540db9d74eSMina Almasry /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
4550db9d74eSMina Almasry  */
4560db9d74eSMina Almasry static int allocate_file_region_entries(struct resv_map *resv,
4570db9d74eSMina Almasry 					int regions_needed)
4580db9d74eSMina Almasry 	__must_hold(&resv->lock)
4590db9d74eSMina Almasry {
46034665341SMiaohe Lin 	LIST_HEAD(allocated_regions);
4610db9d74eSMina Almasry 	int to_allocate = 0, i = 0;
4620db9d74eSMina Almasry 	struct file_region *trg = NULL, *rg = NULL;
4630db9d74eSMina Almasry 
4640db9d74eSMina Almasry 	VM_BUG_ON(regions_needed < 0);
4650db9d74eSMina Almasry 
4660db9d74eSMina Almasry 	/*
4670db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
4680db9d74eSMina Almasry 	 * the number of in progress add operations plus regions_needed.
4690db9d74eSMina Almasry 	 *
4700db9d74eSMina Almasry 	 * This is a while loop because when we drop the lock, some other call
4710db9d74eSMina Almasry 	 * to region_add or region_del may have consumed some region_entries,
4720db9d74eSMina Almasry 	 * so we keep looping here until we finally have enough entries for
4730db9d74eSMina Almasry 	 * (adds_in_progress + regions_needed).
4740db9d74eSMina Almasry 	 */
4750db9d74eSMina Almasry 	while (resv->region_cache_count <
4760db9d74eSMina Almasry 	       (resv->adds_in_progress + regions_needed)) {
4770db9d74eSMina Almasry 		to_allocate = resv->adds_in_progress + regions_needed -
4780db9d74eSMina Almasry 			      resv->region_cache_count;
4790db9d74eSMina Almasry 
4800db9d74eSMina Almasry 		/* At this point, we should have enough entries in the cache
481f0953a1bSIngo Molnar 		 * for all the existing adds_in_progress. We should only be
4820db9d74eSMina Almasry 		 * needing to allocate for regions_needed.
4830db9d74eSMina Almasry 		 */
4840db9d74eSMina Almasry 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
4850db9d74eSMina Almasry 
4860db9d74eSMina Almasry 		spin_unlock(&resv->lock);
4870db9d74eSMina Almasry 		for (i = 0; i < to_allocate; i++) {
4880db9d74eSMina Almasry 			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
4890db9d74eSMina Almasry 			if (!trg)
4900db9d74eSMina Almasry 				goto out_of_memory;
4910db9d74eSMina Almasry 			list_add(&trg->link, &allocated_regions);
4920db9d74eSMina Almasry 		}
4930db9d74eSMina Almasry 
4940db9d74eSMina Almasry 		spin_lock(&resv->lock);
4950db9d74eSMina Almasry 
496d3ec7b6eSWei Yang 		list_splice(&allocated_regions, &resv->region_cache);
497d3ec7b6eSWei Yang 		resv->region_cache_count += to_allocate;
4980db9d74eSMina Almasry 	}
4990db9d74eSMina Almasry 
5000db9d74eSMina Almasry 	return 0;
5010db9d74eSMina Almasry 
5020db9d74eSMina Almasry out_of_memory:
5030db9d74eSMina Almasry 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
504d75c6af9SMina Almasry 		list_del(&rg->link);
505d75c6af9SMina Almasry 		kfree(rg);
506d75c6af9SMina Almasry 	}
5070db9d74eSMina Almasry 	return -ENOMEM;
508d75c6af9SMina Almasry }
509d75c6af9SMina Almasry 
5101dd308a7SMike Kravetz /*
5111dd308a7SMike Kravetz  * Add the huge page range represented by [f, t) to the reserve
5120db9d74eSMina Almasry  * map.  Regions will be taken from the cache to fill in this range.
5130db9d74eSMina Almasry  * Sufficient regions should exist in the cache due to the previous
5140db9d74eSMina Almasry  * call to region_chg with the same range, but in some cases the cache will not
5150db9d74eSMina Almasry  * have sufficient entries due to races with other code doing region_add or
5160db9d74eSMina Almasry  * region_del.  The extra needed entries will be allocated.
517cf3ad20bSMike Kravetz  *
5180db9d74eSMina Almasry  * regions_needed is the out value provided by a previous call to region_chg.
5190db9d74eSMina Almasry  *
5200db9d74eSMina Almasry  * Return the number of new huge pages added to the map.  This number is greater
5210db9d74eSMina Almasry  * than or equal to zero.  If file_region entries needed to be allocated for
5227c8de358SEthon Paul  * this operation and we were not able to allocate, it returns -ENOMEM.
5230db9d74eSMina Almasry  * region_add of regions of length 1 never allocate file_regions and cannot
5240db9d74eSMina Almasry  * fail; region_chg will always allocate at least 1 entry and a region_add for
5250db9d74eSMina Almasry  * 1 page will only require at most 1 entry.
5261dd308a7SMike Kravetz  */
5270db9d74eSMina Almasry static long region_add(struct resv_map *resv, long f, long t,
528075a61d0SMina Almasry 		       long in_regions_needed, struct hstate *h,
529075a61d0SMina Almasry 		       struct hugetlb_cgroup *h_cg)
53096822904SAndy Whitcroft {
5310db9d74eSMina Almasry 	long add = 0, actual_regions_needed = 0;
53296822904SAndy Whitcroft 
5337b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
5340db9d74eSMina Almasry retry:
5350db9d74eSMina Almasry 
5360db9d74eSMina Almasry 	/* Count how many regions are actually needed to execute this add. */
537972a3da3SWei Yang 	add_reservation_in_range(resv, f, t, NULL, NULL,
538972a3da3SWei Yang 				 &actual_regions_needed);
53996822904SAndy Whitcroft 
5405e911373SMike Kravetz 	/*
5410db9d74eSMina Almasry 	 * Check for sufficient descriptors in the cache to accommodate
5420db9d74eSMina Almasry 	 * this add operation. Note that actual_regions_needed may be greater
5430db9d74eSMina Almasry 	 * than in_regions_needed, as the resv_map may have been modified since
5440db9d74eSMina Almasry 	 * the region_chg call. In this case, we need to make sure that we
5450db9d74eSMina Almasry 	 * allocate extra entries, such that we have enough for all the
5460db9d74eSMina Almasry 	 * existing adds_in_progress, plus the excess needed for this
5470db9d74eSMina Almasry 	 * operation.
5485e911373SMike Kravetz 	 */
5490db9d74eSMina Almasry 	if (actual_regions_needed > in_regions_needed &&
5500db9d74eSMina Almasry 	    resv->region_cache_count <
5510db9d74eSMina Almasry 		    resv->adds_in_progress +
5520db9d74eSMina Almasry 			    (actual_regions_needed - in_regions_needed)) {
5530db9d74eSMina Almasry 		/* region_add operation of range 1 should never need to
5540db9d74eSMina Almasry 		 * allocate file_region entries.
5550db9d74eSMina Almasry 		 */
5560db9d74eSMina Almasry 		VM_BUG_ON(t - f <= 1);
5575e911373SMike Kravetz 
5580db9d74eSMina Almasry 		if (allocate_file_region_entries(
5590db9d74eSMina Almasry 			    resv, actual_regions_needed - in_regions_needed)) {
5600db9d74eSMina Almasry 			return -ENOMEM;
5615e911373SMike Kravetz 		}
5625e911373SMike Kravetz 
5630db9d74eSMina Almasry 		goto retry;
5640db9d74eSMina Almasry 	}
565cf3ad20bSMike Kravetz 
566972a3da3SWei Yang 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
5670db9d74eSMina Almasry 
5680db9d74eSMina Almasry 	resv->adds_in_progress -= in_regions_needed;
5690db9d74eSMina Almasry 
5707b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
571cf3ad20bSMike Kravetz 	return add;
57296822904SAndy Whitcroft }
57396822904SAndy Whitcroft 
5741dd308a7SMike Kravetz /*
5751dd308a7SMike Kravetz  * Examine the existing reserve map and determine how many
5761dd308a7SMike Kravetz  * huge pages in the specified range [f, t) are NOT currently
5771dd308a7SMike Kravetz  * represented.  This routine is called before a subsequent
5781dd308a7SMike Kravetz  * call to region_add that will actually modify the reserve
5791dd308a7SMike Kravetz  * map to add the specified range [f, t).  region_chg does
5801dd308a7SMike Kravetz  * not change the number of huge pages represented by the
5810db9d74eSMina Almasry  * map.  A number of new file_region structures is added to the cache as a
5820db9d74eSMina Almasry  * placeholder, for the subsequent region_add call to use. At least 1
5830db9d74eSMina Almasry  * file_region structure is added.
5840db9d74eSMina Almasry  *
5850db9d74eSMina Almasry  * out_regions_needed is the number of regions added to the
5860db9d74eSMina Almasry  * resv->adds_in_progress.  This value needs to be provided to a follow up call
5870db9d74eSMina Almasry  * to region_add or region_abort for proper accounting.
5885e911373SMike Kravetz  *
5895e911373SMike Kravetz  * Returns the number of huge pages that need to be added to the existing
5905e911373SMike Kravetz  * reservation map for the range [f, t).  This number is greater or equal to
5915e911373SMike Kravetz  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
5925e911373SMike Kravetz  * is needed and can not be allocated.
5931dd308a7SMike Kravetz  */
5940db9d74eSMina Almasry static long region_chg(struct resv_map *resv, long f, long t,
5950db9d74eSMina Almasry 		       long *out_regions_needed)
59696822904SAndy Whitcroft {
59796822904SAndy Whitcroft 	long chg = 0;
59896822904SAndy Whitcroft 
5997b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
6005e911373SMike Kravetz 
601972a3da3SWei Yang 	/* Count how many hugepages in this range are NOT represented. */
602075a61d0SMina Almasry 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
603972a3da3SWei Yang 				       out_regions_needed);
6045e911373SMike Kravetz 
6050db9d74eSMina Almasry 	if (*out_regions_needed == 0)
6060db9d74eSMina Almasry 		*out_regions_needed = 1;
6075e911373SMike Kravetz 
6080db9d74eSMina Almasry 	if (allocate_file_region_entries(resv, *out_regions_needed))
6095e911373SMike Kravetz 		return -ENOMEM;
6105e911373SMike Kravetz 
6110db9d74eSMina Almasry 	resv->adds_in_progress += *out_regions_needed;
61296822904SAndy Whitcroft 
6137b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
61496822904SAndy Whitcroft 	return chg;
61596822904SAndy Whitcroft }
61696822904SAndy Whitcroft 
6171dd308a7SMike Kravetz /*
6185e911373SMike Kravetz  * Abort the in progress add operation.  The adds_in_progress field
6195e911373SMike Kravetz  * of the resv_map keeps track of the operations in progress between
6205e911373SMike Kravetz  * calls to region_chg and region_add.  Operations are sometimes
6215e911373SMike Kravetz  * aborted after the call to region_chg.  In such cases, region_abort
6220db9d74eSMina Almasry  * is called to decrement the adds_in_progress counter. regions_needed
6230db9d74eSMina Almasry  * is the value returned by the region_chg call, it is used to decrement
6240db9d74eSMina Almasry  * the adds_in_progress counter.
6255e911373SMike Kravetz  *
6265e911373SMike Kravetz  * NOTE: The range arguments [f, t) are not needed or used in this
6275e911373SMike Kravetz  * routine.  They are kept to make reading the calling code easier as
6285e911373SMike Kravetz  * arguments will match the associated region_chg call.
6295e911373SMike Kravetz  */
6300db9d74eSMina Almasry static void region_abort(struct resv_map *resv, long f, long t,
6310db9d74eSMina Almasry 			 long regions_needed)
6325e911373SMike Kravetz {
6335e911373SMike Kravetz 	spin_lock(&resv->lock);
6345e911373SMike Kravetz 	VM_BUG_ON(!resv->region_cache_count);
6350db9d74eSMina Almasry 	resv->adds_in_progress -= regions_needed;
6365e911373SMike Kravetz 	spin_unlock(&resv->lock);
6375e911373SMike Kravetz }
6385e911373SMike Kravetz 
6395e911373SMike Kravetz /*
640feba16e2SMike Kravetz  * Delete the specified range [f, t) from the reserve map.  If the
641feba16e2SMike Kravetz  * t parameter is LONG_MAX, this indicates that ALL regions after f
642feba16e2SMike Kravetz  * should be deleted.  Locate the regions which intersect [f, t)
643feba16e2SMike Kravetz  * and either trim, delete or split the existing regions.
644feba16e2SMike Kravetz  *
645feba16e2SMike Kravetz  * Returns the number of huge pages deleted from the reserve map.
646feba16e2SMike Kravetz  * In the normal case, the return value is zero or more.  In the
647feba16e2SMike Kravetz  * case where a region must be split, a new region descriptor must
648feba16e2SMike Kravetz  * be allocated.  If the allocation fails, -ENOMEM will be returned.
649feba16e2SMike Kravetz  * NOTE: If the parameter t == LONG_MAX, then we will never split
650feba16e2SMike Kravetz  * a region and possibly return -ENOMEM.  Callers specifying
651feba16e2SMike Kravetz  * t == LONG_MAX do not need to check for -ENOMEM error.
6521dd308a7SMike Kravetz  */
653feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
65496822904SAndy Whitcroft {
6551406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
65696822904SAndy Whitcroft 	struct file_region *rg, *trg;
657feba16e2SMike Kravetz 	struct file_region *nrg = NULL;
658feba16e2SMike Kravetz 	long del = 0;
65996822904SAndy Whitcroft 
660feba16e2SMike Kravetz retry:
6617b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
662feba16e2SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
663dbe409e4SMike Kravetz 		/*
664dbe409e4SMike Kravetz 		 * Skip regions before the range to be deleted.  file_region
665dbe409e4SMike Kravetz 		 * ranges are normally of the form [from, to).  However, there
666dbe409e4SMike Kravetz 		 * may be a "placeholder" entry in the map which is of the form
667dbe409e4SMike Kravetz 		 * (from, to) with from == to.  Check for placeholder entries
668dbe409e4SMike Kravetz 		 * at the beginning of the range to be deleted.
669dbe409e4SMike Kravetz 		 */
670dbe409e4SMike Kravetz 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
671feba16e2SMike Kravetz 			continue;
672dbe409e4SMike Kravetz 
673feba16e2SMike Kravetz 		if (rg->from >= t)
67496822904SAndy Whitcroft 			break;
67596822904SAndy Whitcroft 
676feba16e2SMike Kravetz 		if (f > rg->from && t < rg->to) { /* Must split region */
677feba16e2SMike Kravetz 			/*
678feba16e2SMike Kravetz 			 * Check for an entry in the cache before dropping
679feba16e2SMike Kravetz 			 * lock and attempting allocation.
680feba16e2SMike Kravetz 			 */
681feba16e2SMike Kravetz 			if (!nrg &&
682feba16e2SMike Kravetz 			    resv->region_cache_count > resv->adds_in_progress) {
683feba16e2SMike Kravetz 				nrg = list_first_entry(&resv->region_cache,
684feba16e2SMike Kravetz 							struct file_region,
685feba16e2SMike Kravetz 							link);
686feba16e2SMike Kravetz 				list_del(&nrg->link);
687feba16e2SMike Kravetz 				resv->region_cache_count--;
68896822904SAndy Whitcroft 			}
68996822904SAndy Whitcroft 
690feba16e2SMike Kravetz 			if (!nrg) {
691feba16e2SMike Kravetz 				spin_unlock(&resv->lock);
692feba16e2SMike Kravetz 				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
693feba16e2SMike Kravetz 				if (!nrg)
694feba16e2SMike Kravetz 					return -ENOMEM;
695feba16e2SMike Kravetz 				goto retry;
696feba16e2SMike Kravetz 			}
697feba16e2SMike Kravetz 
698feba16e2SMike Kravetz 			del += t - f;
69979aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_file_region(
700d85aecf2SMiaohe Lin 				resv, rg, t - f, false);
701feba16e2SMike Kravetz 
702feba16e2SMike Kravetz 			/* New entry for end of split region */
703feba16e2SMike Kravetz 			nrg->from = t;
704feba16e2SMike Kravetz 			nrg->to = rg->to;
705075a61d0SMina Almasry 
706075a61d0SMina Almasry 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
707075a61d0SMina Almasry 
708feba16e2SMike Kravetz 			INIT_LIST_HEAD(&nrg->link);
709feba16e2SMike Kravetz 
710feba16e2SMike Kravetz 			/* Original entry is trimmed */
711feba16e2SMike Kravetz 			rg->to = f;
712feba16e2SMike Kravetz 
713feba16e2SMike Kravetz 			list_add(&nrg->link, &rg->link);
714feba16e2SMike Kravetz 			nrg = NULL;
71596822904SAndy Whitcroft 			break;
716feba16e2SMike Kravetz 		}
717feba16e2SMike Kravetz 
718feba16e2SMike Kravetz 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
719feba16e2SMike Kravetz 			del += rg->to - rg->from;
720075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
721d85aecf2SMiaohe Lin 							    rg->to - rg->from, true);
72296822904SAndy Whitcroft 			list_del(&rg->link);
72396822904SAndy Whitcroft 			kfree(rg);
724feba16e2SMike Kravetz 			continue;
72596822904SAndy Whitcroft 		}
7267b24d861SDavidlohr Bueso 
727feba16e2SMike Kravetz 		if (f <= rg->from) {	/* Trim beginning of region */
728075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
729d85aecf2SMiaohe Lin 							    t - rg->from, false);
730075a61d0SMina Almasry 
73179aa925bSMike Kravetz 			del += t - rg->from;
73279aa925bSMike Kravetz 			rg->from = t;
73379aa925bSMike Kravetz 		} else {		/* Trim end of region */
734075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_file_region(resv, rg,
735d85aecf2SMiaohe Lin 							    rg->to - f, false);
73679aa925bSMike Kravetz 
73779aa925bSMike Kravetz 			del += rg->to - f;
73879aa925bSMike Kravetz 			rg->to = f;
739feba16e2SMike Kravetz 		}
740feba16e2SMike Kravetz 	}
741feba16e2SMike Kravetz 
7427b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
743feba16e2SMike Kravetz 	kfree(nrg);
744feba16e2SMike Kravetz 	return del;
74596822904SAndy Whitcroft }
74696822904SAndy Whitcroft 
7471dd308a7SMike Kravetz /*
748b5cec28dSMike Kravetz  * A rare out of memory error was encountered which prevented removal of
749b5cec28dSMike Kravetz  * the reserve map region for a page.  The huge page itself was free'ed
750b5cec28dSMike Kravetz  * and removed from the page cache.  This routine will adjust the subpool
751b5cec28dSMike Kravetz  * usage count, and the global reserve count if needed.  By incrementing
752b5cec28dSMike Kravetz  * these counts, the reserve map entry which could not be deleted will
753b5cec28dSMike Kravetz  * appear as a "reserved" entry instead of simply dangling with incorrect
754b5cec28dSMike Kravetz  * counts.
755b5cec28dSMike Kravetz  */
75672e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
757b5cec28dSMike Kravetz {
758b5cec28dSMike Kravetz 	struct hugepage_subpool *spool = subpool_inode(inode);
759b5cec28dSMike Kravetz 	long rsv_adjust;
760da56388cSMiaohe Lin 	bool reserved = false;
761b5cec28dSMike Kravetz 
762b5cec28dSMike Kravetz 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
763da56388cSMiaohe Lin 	if (rsv_adjust > 0) {
764b5cec28dSMike Kravetz 		struct hstate *h = hstate_inode(inode);
765b5cec28dSMike Kravetz 
766da56388cSMiaohe Lin 		if (!hugetlb_acct_memory(h, 1))
767da56388cSMiaohe Lin 			reserved = true;
768da56388cSMiaohe Lin 	} else if (!rsv_adjust) {
769da56388cSMiaohe Lin 		reserved = true;
770b5cec28dSMike Kravetz 	}
771da56388cSMiaohe Lin 
772da56388cSMiaohe Lin 	if (!reserved)
773da56388cSMiaohe Lin 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
774b5cec28dSMike Kravetz }
775b5cec28dSMike Kravetz 
776b5cec28dSMike Kravetz /*
7771dd308a7SMike Kravetz  * Count and return the number of huge pages in the reserve map
7781dd308a7SMike Kravetz  * that intersect with the range [f, t).
7791dd308a7SMike Kravetz  */
7801406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
78184afd99bSAndy Whitcroft {
7821406ec9bSJoonsoo Kim 	struct list_head *head = &resv->regions;
78384afd99bSAndy Whitcroft 	struct file_region *rg;
78484afd99bSAndy Whitcroft 	long chg = 0;
78584afd99bSAndy Whitcroft 
7867b24d861SDavidlohr Bueso 	spin_lock(&resv->lock);
78784afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
78884afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
789f2135a4aSWang Sheng-Hui 		long seg_from;
790f2135a4aSWang Sheng-Hui 		long seg_to;
79184afd99bSAndy Whitcroft 
79284afd99bSAndy Whitcroft 		if (rg->to <= f)
79384afd99bSAndy Whitcroft 			continue;
79484afd99bSAndy Whitcroft 		if (rg->from >= t)
79584afd99bSAndy Whitcroft 			break;
79684afd99bSAndy Whitcroft 
79784afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
79884afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
79984afd99bSAndy Whitcroft 
80084afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
80184afd99bSAndy Whitcroft 	}
8027b24d861SDavidlohr Bueso 	spin_unlock(&resv->lock);
80384afd99bSAndy Whitcroft 
80484afd99bSAndy Whitcroft 	return chg;
80584afd99bSAndy Whitcroft }
80684afd99bSAndy Whitcroft 
80796822904SAndy Whitcroft /*
808e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
809e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
810e7c4b0bfSAndy Whitcroft  */
811a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
812a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
813e7c4b0bfSAndy Whitcroft {
814a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
815a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
816e7c4b0bfSAndy Whitcroft }
817e7c4b0bfSAndy Whitcroft 
8180fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
8190fe6e20bSNaoya Horiguchi 				     unsigned long address)
8200fe6e20bSNaoya Horiguchi {
8210fe6e20bSNaoya Horiguchi 	return vma_hugecache_offset(hstate_vma(vma), vma, address);
8220fe6e20bSNaoya Horiguchi }
823dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
8240fe6e20bSNaoya Horiguchi 
82584afd99bSAndy Whitcroft /*
82608fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
82708fba699SMel Gorman  * cases this will be same size as used by the page table entries.
82808fba699SMel Gorman  */
82908fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
83008fba699SMel Gorman {
83105ea8860SDan Williams 	if (vma->vm_ops && vma->vm_ops->pagesize)
83205ea8860SDan Williams 		return vma->vm_ops->pagesize(vma);
83308fba699SMel Gorman 	return PAGE_SIZE;
83408fba699SMel Gorman }
835f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
83608fba699SMel Gorman 
83708fba699SMel Gorman /*
8383340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
8393340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
84009135cc5SDan Williams  * architectures where it differs, an architecture-specific 'strong'
84109135cc5SDan Williams  * version of this symbol is required.
8423340289dSMel Gorman  */
84309135cc5SDan Williams __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
8443340289dSMel Gorman {
8453340289dSMel Gorman 	return vma_kernel_pagesize(vma);
8463340289dSMel Gorman }
8473340289dSMel Gorman 
8483340289dSMel Gorman /*
84984afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
85084afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
85184afd99bSAndy Whitcroft  * alignment.
85284afd99bSAndy Whitcroft  */
85384afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
85484afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
85504f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
85684afd99bSAndy Whitcroft 
857a1e78772SMel Gorman /*
858a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
859a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
860a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
861a1e78772SMel Gorman  *
862a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
863a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
864a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
865a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
86684afd99bSAndy Whitcroft  *
86784afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
86884afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
86984afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
87084afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
87184afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
87284afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
87384afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
87484afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
875a1e78772SMel Gorman  */
876e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
877e7c4b0bfSAndy Whitcroft {
878e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
879e7c4b0bfSAndy Whitcroft }
880e7c4b0bfSAndy Whitcroft 
881e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
882e7c4b0bfSAndy Whitcroft 							unsigned long value)
883e7c4b0bfSAndy Whitcroft {
884e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
885e7c4b0bfSAndy Whitcroft }
886e7c4b0bfSAndy Whitcroft 
887e9fe92aeSMina Almasry static void
888e9fe92aeSMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
889e9fe92aeSMina Almasry 					  struct hugetlb_cgroup *h_cg,
890e9fe92aeSMina Almasry 					  struct hstate *h)
891e9fe92aeSMina Almasry {
892e9fe92aeSMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
893e9fe92aeSMina Almasry 	if (!h_cg || !h) {
894e9fe92aeSMina Almasry 		resv_map->reservation_counter = NULL;
895e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = 0;
896e9fe92aeSMina Almasry 		resv_map->css = NULL;
897e9fe92aeSMina Almasry 	} else {
898e9fe92aeSMina Almasry 		resv_map->reservation_counter =
899e9fe92aeSMina Almasry 			&h_cg->rsvd_hugepage[hstate_index(h)];
900e9fe92aeSMina Almasry 		resv_map->pages_per_hpage = pages_per_huge_page(h);
901e9fe92aeSMina Almasry 		resv_map->css = &h_cg->css;
902e9fe92aeSMina Almasry 	}
903e9fe92aeSMina Almasry #endif
904e9fe92aeSMina Almasry }
905e9fe92aeSMina Almasry 
9069119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
90784afd99bSAndy Whitcroft {
90884afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
9095e911373SMike Kravetz 	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
9105e911373SMike Kravetz 
9115e911373SMike Kravetz 	if (!resv_map || !rg) {
9125e911373SMike Kravetz 		kfree(resv_map);
9135e911373SMike Kravetz 		kfree(rg);
91484afd99bSAndy Whitcroft 		return NULL;
9155e911373SMike Kravetz 	}
91684afd99bSAndy Whitcroft 
91784afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
9187b24d861SDavidlohr Bueso 	spin_lock_init(&resv_map->lock);
91984afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
92084afd99bSAndy Whitcroft 
9215e911373SMike Kravetz 	resv_map->adds_in_progress = 0;
922e9fe92aeSMina Almasry 	/*
923e9fe92aeSMina Almasry 	 * Initialize these to 0. On shared mappings, 0's here indicate these
924e9fe92aeSMina Almasry 	 * fields don't do cgroup accounting. On private mappings, these will be
925e9fe92aeSMina Almasry 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
926e9fe92aeSMina Almasry 	 * reservations are to be un-charged from here.
927e9fe92aeSMina Almasry 	 */
928e9fe92aeSMina Almasry 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
9295e911373SMike Kravetz 
9305e911373SMike Kravetz 	INIT_LIST_HEAD(&resv_map->region_cache);
9315e911373SMike Kravetz 	list_add(&rg->link, &resv_map->region_cache);
9325e911373SMike Kravetz 	resv_map->region_cache_count = 1;
9335e911373SMike Kravetz 
93484afd99bSAndy Whitcroft 	return resv_map;
93584afd99bSAndy Whitcroft }
93684afd99bSAndy Whitcroft 
9379119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
93884afd99bSAndy Whitcroft {
93984afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
9405e911373SMike Kravetz 	struct list_head *head = &resv_map->region_cache;
9415e911373SMike Kravetz 	struct file_region *rg, *trg;
94284afd99bSAndy Whitcroft 
94384afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
944feba16e2SMike Kravetz 	region_del(resv_map, 0, LONG_MAX);
9455e911373SMike Kravetz 
9465e911373SMike Kravetz 	/* ... and any entries left in the cache */
9475e911373SMike Kravetz 	list_for_each_entry_safe(rg, trg, head, link) {
9485e911373SMike Kravetz 		list_del(&rg->link);
9495e911373SMike Kravetz 		kfree(rg);
9505e911373SMike Kravetz 	}
9515e911373SMike Kravetz 
9525e911373SMike Kravetz 	VM_BUG_ON(resv_map->adds_in_progress);
9535e911373SMike Kravetz 
95484afd99bSAndy Whitcroft 	kfree(resv_map);
95584afd99bSAndy Whitcroft }
95684afd99bSAndy Whitcroft 
9574e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
9584e35f483SJoonsoo Kim {
959f27a5136SMike Kravetz 	/*
960f27a5136SMike Kravetz 	 * At inode evict time, i_mapping may not point to the original
961f27a5136SMike Kravetz 	 * address space within the inode.  This original address space
962f27a5136SMike Kravetz 	 * contains the pointer to the resv_map.  So, always use the
963f27a5136SMike Kravetz 	 * address space embedded within the inode.
964f27a5136SMike Kravetz 	 * The VERY common case is inode->mapping == &inode->i_data but,
965f27a5136SMike Kravetz 	 * this may not be true for device special inodes.
966f27a5136SMike Kravetz 	 */
967f27a5136SMike Kravetz 	return (struct resv_map *)(&inode->i_data)->private_data;
9684e35f483SJoonsoo Kim }
9694e35f483SJoonsoo Kim 
97084afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
971a1e78772SMel Gorman {
97281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
9734e35f483SJoonsoo Kim 	if (vma->vm_flags & VM_MAYSHARE) {
9744e35f483SJoonsoo Kim 		struct address_space *mapping = vma->vm_file->f_mapping;
9754e35f483SJoonsoo Kim 		struct inode *inode = mapping->host;
9764e35f483SJoonsoo Kim 
9774e35f483SJoonsoo Kim 		return inode_resv_map(inode);
9784e35f483SJoonsoo Kim 
9794e35f483SJoonsoo Kim 	} else {
98084afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
98184afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
9824e35f483SJoonsoo Kim 	}
983a1e78772SMel Gorman }
984a1e78772SMel Gorman 
98584afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
986a1e78772SMel Gorman {
98781d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
98881d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
989a1e78772SMel Gorman 
99084afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
99184afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
99204f2cbe3SMel Gorman }
99304f2cbe3SMel Gorman 
99404f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
99504f2cbe3SMel Gorman {
99681d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
99781d1b09cSSasha Levin 	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
998e7c4b0bfSAndy Whitcroft 
999e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
100004f2cbe3SMel Gorman }
100104f2cbe3SMel Gorman 
100204f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
100304f2cbe3SMel Gorman {
100481d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1005e7c4b0bfSAndy Whitcroft 
1006e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
1007a1e78772SMel Gorman }
1008a1e78772SMel Gorman 
100904f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1010a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1011a1e78772SMel Gorman {
101281d1b09cSSasha Levin 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1013f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
1014a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
1015a1e78772SMel Gorman }
1016a1e78772SMel Gorman 
1017550a7d60SMina Almasry /*
1018550a7d60SMina Almasry  * Reset and decrement one ref on hugepage private reservation.
1019550a7d60SMina Almasry  * Called with mm->mmap_sem writer semaphore held.
1020550a7d60SMina Almasry  * This function should be only used by move_vma() and operate on
1021550a7d60SMina Almasry  * same sized vma. It should never come here with last ref on the
1022550a7d60SMina Almasry  * reservation.
1023550a7d60SMina Almasry  */
1024550a7d60SMina Almasry void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1025550a7d60SMina Almasry {
1026550a7d60SMina Almasry 	/*
1027550a7d60SMina Almasry 	 * Clear the old hugetlb private page reservation.
1028550a7d60SMina Almasry 	 * It has already been transferred to new_vma.
1029550a7d60SMina Almasry 	 *
1030550a7d60SMina Almasry 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1031550a7d60SMina Almasry 	 * which copies vma into new_vma and unmaps vma. After the copy
1032550a7d60SMina Almasry 	 * operation both new_vma and vma share a reference to the resv_map
1033550a7d60SMina Almasry 	 * struct, and at that point vma is about to be unmapped. We don't
1034550a7d60SMina Almasry 	 * want to return the reservation to the pool at unmap of vma because
1035550a7d60SMina Almasry 	 * the reservation still lives on in new_vma, so simply decrement the
1036550a7d60SMina Almasry 	 * ref here and remove the resv_map reference from this vma.
1037550a7d60SMina Almasry 	 */
1038550a7d60SMina Almasry 	struct resv_map *reservations = vma_resv_map(vma);
1039550a7d60SMina Almasry 
1040afe041c2SBui Quang Minh 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1041afe041c2SBui Quang Minh 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1042550a7d60SMina Almasry 		kref_put(&reservations->refs, resv_map_release);
1043afe041c2SBui Quang Minh 	}
1044550a7d60SMina Almasry 
1045550a7d60SMina Almasry 	reset_vma_resv_huge_pages(vma);
1046550a7d60SMina Almasry }
1047550a7d60SMina Almasry 
1048a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
1049559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1050a1e78772SMel Gorman {
1051af0ed73eSJoonsoo Kim 	if (vma->vm_flags & VM_NORESERVE) {
1052af0ed73eSJoonsoo Kim 		/*
1053af0ed73eSJoonsoo Kim 		 * This address is already reserved by other process(chg == 0),
1054af0ed73eSJoonsoo Kim 		 * so, we should decrement reserved count. Without decrementing,
1055af0ed73eSJoonsoo Kim 		 * reserve count remains after releasing inode, because this
1056af0ed73eSJoonsoo Kim 		 * allocated page will go into page cache and is regarded as
1057af0ed73eSJoonsoo Kim 		 * coming from reserved pool in releasing step.  Currently, we
1058af0ed73eSJoonsoo Kim 		 * don't have any other solution to deal with this situation
1059af0ed73eSJoonsoo Kim 		 * properly, so add work-around here.
1060af0ed73eSJoonsoo Kim 		 */
1061af0ed73eSJoonsoo Kim 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1062559ec2f8SNicholas Krause 			return true;
1063af0ed73eSJoonsoo Kim 		else
1064559ec2f8SNicholas Krause 			return false;
1065af0ed73eSJoonsoo Kim 	}
1066a63884e9SJoonsoo Kim 
1067a63884e9SJoonsoo Kim 	/* Shared mappings always use reserves */
10681fb1b0e9SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE) {
10691fb1b0e9SMike Kravetz 		/*
10701fb1b0e9SMike Kravetz 		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
10711fb1b0e9SMike Kravetz 		 * be a region map for all pages.  The only situation where
10721fb1b0e9SMike Kravetz 		 * there is no region map is if a hole was punched via
10737c8de358SEthon Paul 		 * fallocate.  In this case, there really are no reserves to
10741fb1b0e9SMike Kravetz 		 * use.  This situation is indicated if chg != 0.
10751fb1b0e9SMike Kravetz 		 */
10761fb1b0e9SMike Kravetz 		if (chg)
10771fb1b0e9SMike Kravetz 			return false;
10781fb1b0e9SMike Kravetz 		else
1079559ec2f8SNicholas Krause 			return true;
10801fb1b0e9SMike Kravetz 	}
1081a63884e9SJoonsoo Kim 
1082a63884e9SJoonsoo Kim 	/*
1083a63884e9SJoonsoo Kim 	 * Only the process that called mmap() has reserves for
1084a63884e9SJoonsoo Kim 	 * private mappings.
1085a63884e9SJoonsoo Kim 	 */
108667961f9dSMike Kravetz 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
108767961f9dSMike Kravetz 		/*
108867961f9dSMike Kravetz 		 * Like the shared case above, a hole punch or truncate
108967961f9dSMike Kravetz 		 * could have been performed on the private mapping.
109067961f9dSMike Kravetz 		 * Examine the value of chg to determine if reserves
109167961f9dSMike Kravetz 		 * actually exist or were previously consumed.
109267961f9dSMike Kravetz 		 * Very Subtle - The value of chg comes from a previous
109367961f9dSMike Kravetz 		 * call to vma_needs_reserves().  The reserve map for
109467961f9dSMike Kravetz 		 * private mappings has different (opposite) semantics
109567961f9dSMike Kravetz 		 * than that of shared mappings.  vma_needs_reserves()
109667961f9dSMike Kravetz 		 * has already taken this difference in semantics into
109767961f9dSMike Kravetz 		 * account.  Therefore, the meaning of chg is the same
109867961f9dSMike Kravetz 		 * as in the shared case above.  Code could easily be
109967961f9dSMike Kravetz 		 * combined, but keeping it separate draws attention to
110067961f9dSMike Kravetz 		 * subtle differences.
110167961f9dSMike Kravetz 		 */
110267961f9dSMike Kravetz 		if (chg)
110367961f9dSMike Kravetz 			return false;
110467961f9dSMike Kravetz 		else
1105559ec2f8SNicholas Krause 			return true;
110667961f9dSMike Kravetz 	}
1107a63884e9SJoonsoo Kim 
1108559ec2f8SNicholas Krause 	return false;
1109a1e78772SMel Gorman }
1110a1e78772SMel Gorman 
1111a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
11121da177e4SLinus Torvalds {
11131da177e4SLinus Torvalds 	int nid = page_to_nid(page);
11149487ca60SMike Kravetz 
11159487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1116b65a4edaSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1117b65a4edaSMike Kravetz 
11180edaecfaSAneesh Kumar K.V 	list_move(&page->lru, &h->hugepage_freelists[nid]);
1119a5516438SAndi Kleen 	h->free_huge_pages++;
1120a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
11216c037149SMike Kravetz 	SetHPageFreed(page);
11221da177e4SLinus Torvalds }
11231da177e4SLinus Torvalds 
112494310cbcSAnshuman Khandual static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1125bf50bab2SNaoya Horiguchi {
1126bf50bab2SNaoya Horiguchi 	struct page *page;
11271a08ae36SPavel Tatashin 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1128bf50bab2SNaoya Horiguchi 
11299487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
1130bbe88753SJoonsoo Kim 	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
11316077c943SAlex Sierra 		if (pin && !is_longterm_pinnable_page(page))
1132bbe88753SJoonsoo Kim 			continue;
1133bbe88753SJoonsoo Kim 
11346664bfc8SWei Yang 		if (PageHWPoison(page))
11356664bfc8SWei Yang 			continue;
1136bbe88753SJoonsoo Kim 
11370edaecfaSAneesh Kumar K.V 		list_move(&page->lru, &h->hugepage_activelist);
1138a9869b83SNaoya Horiguchi 		set_page_refcounted(page);
11396c037149SMike Kravetz 		ClearHPageFreed(page);
1140bf50bab2SNaoya Horiguchi 		h->free_huge_pages--;
1141bf50bab2SNaoya Horiguchi 		h->free_huge_pages_node[nid]--;
1142bf50bab2SNaoya Horiguchi 		return page;
1143bf50bab2SNaoya Horiguchi 	}
1144bf50bab2SNaoya Horiguchi 
11456664bfc8SWei Yang 	return NULL;
11466664bfc8SWei Yang }
11476664bfc8SWei Yang 
11483e59fcb0SMichal Hocko static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
11493e59fcb0SMichal Hocko 		nodemask_t *nmask)
115094310cbcSAnshuman Khandual {
11513e59fcb0SMichal Hocko 	unsigned int cpuset_mems_cookie;
11523e59fcb0SMichal Hocko 	struct zonelist *zonelist;
11533e59fcb0SMichal Hocko 	struct zone *zone;
11543e59fcb0SMichal Hocko 	struct zoneref *z;
115598fa15f3SAnshuman Khandual 	int node = NUMA_NO_NODE;
11563e59fcb0SMichal Hocko 
11573e59fcb0SMichal Hocko 	zonelist = node_zonelist(nid, gfp_mask);
11583e59fcb0SMichal Hocko 
11593e59fcb0SMichal Hocko retry_cpuset:
11603e59fcb0SMichal Hocko 	cpuset_mems_cookie = read_mems_allowed_begin();
11613e59fcb0SMichal Hocko 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
116294310cbcSAnshuman Khandual 		struct page *page;
116394310cbcSAnshuman Khandual 
11643e59fcb0SMichal Hocko 		if (!cpuset_zone_allowed(zone, gfp_mask))
11653e59fcb0SMichal Hocko 			continue;
11663e59fcb0SMichal Hocko 		/*
11673e59fcb0SMichal Hocko 		 * no need to ask again on the same node. Pool is node rather than
11683e59fcb0SMichal Hocko 		 * zone aware
11693e59fcb0SMichal Hocko 		 */
11703e59fcb0SMichal Hocko 		if (zone_to_nid(zone) == node)
11713e59fcb0SMichal Hocko 			continue;
11723e59fcb0SMichal Hocko 		node = zone_to_nid(zone);
117394310cbcSAnshuman Khandual 
117494310cbcSAnshuman Khandual 		page = dequeue_huge_page_node_exact(h, node);
117594310cbcSAnshuman Khandual 		if (page)
117694310cbcSAnshuman Khandual 			return page;
117794310cbcSAnshuman Khandual 	}
11783e59fcb0SMichal Hocko 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
11793e59fcb0SMichal Hocko 		goto retry_cpuset;
11803e59fcb0SMichal Hocko 
118194310cbcSAnshuman Khandual 	return NULL;
118294310cbcSAnshuman Khandual }
118394310cbcSAnshuman Khandual 
1184a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
1185a5516438SAndi Kleen 				struct vm_area_struct *vma,
1186af0ed73eSJoonsoo Kim 				unsigned long address, int avoid_reserve,
1187af0ed73eSJoonsoo Kim 				long chg)
11881da177e4SLinus Torvalds {
1189cfcaa66fSBen Widawsky 	struct page *page = NULL;
1190480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
119104ec6264SVlastimil Babka 	gfp_t gfp_mask;
11923e59fcb0SMichal Hocko 	nodemask_t *nodemask;
119304ec6264SVlastimil Babka 	int nid;
11941da177e4SLinus Torvalds 
1195a1e78772SMel Gorman 	/*
1196a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
1197a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
1198a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
1199a1e78772SMel Gorman 	 */
1200af0ed73eSJoonsoo Kim 	if (!vma_has_reserves(vma, chg) &&
1201a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
1202c0ff7453SMiao Xie 		goto err;
1203a1e78772SMel Gorman 
120404f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
1205a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
12066eab04a8SJustin P. Mattock 		goto err;
120704f2cbe3SMel Gorman 
120804ec6264SVlastimil Babka 	gfp_mask = htlb_alloc_mask(h);
120904ec6264SVlastimil Babka 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1210cfcaa66fSBen Widawsky 
1211cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
12123e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1213cfcaa66fSBen Widawsky 
1214cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
1215cfcaa66fSBen Widawsky 		nodemask = NULL;
1216cfcaa66fSBen Widawsky 	}
1217cfcaa66fSBen Widawsky 
1218cfcaa66fSBen Widawsky 	if (!page)
1219cfcaa66fSBen Widawsky 		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1220cfcaa66fSBen Widawsky 
12213e59fcb0SMichal Hocko 	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1222d6995da3SMike Kravetz 		SetHPageRestoreReserve(page);
1223a63884e9SJoonsoo Kim 		h->resv_huge_pages--;
1224bf50bab2SNaoya Horiguchi 	}
1225cc9a6c87SMel Gorman 
1226cc9a6c87SMel Gorman 	mpol_cond_put(mpol);
1227cc9a6c87SMel Gorman 	return page;
1228cc9a6c87SMel Gorman 
1229c0ff7453SMiao Xie err:
1230cc9a6c87SMel Gorman 	return NULL;
12311da177e4SLinus Torvalds }
12321da177e4SLinus Torvalds 
12331cac6f2cSLuiz Capitulino /*
12341cac6f2cSLuiz Capitulino  * common helper functions for hstate_next_node_to_{alloc|free}.
12351cac6f2cSLuiz Capitulino  * We may have allocated or freed a huge page based on a different
12361cac6f2cSLuiz Capitulino  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
12371cac6f2cSLuiz Capitulino  * be outside of *nodes_allowed.  Ensure that we use an allowed
12381cac6f2cSLuiz Capitulino  * node for alloc or free.
12391cac6f2cSLuiz Capitulino  */
12401cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
12411cac6f2cSLuiz Capitulino {
12420edaf86cSAndrew Morton 	nid = next_node_in(nid, *nodes_allowed);
12431cac6f2cSLuiz Capitulino 	VM_BUG_ON(nid >= MAX_NUMNODES);
12441cac6f2cSLuiz Capitulino 
12451cac6f2cSLuiz Capitulino 	return nid;
12461cac6f2cSLuiz Capitulino }
12471cac6f2cSLuiz Capitulino 
12481cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
12491cac6f2cSLuiz Capitulino {
12501cac6f2cSLuiz Capitulino 	if (!node_isset(nid, *nodes_allowed))
12511cac6f2cSLuiz Capitulino 		nid = next_node_allowed(nid, nodes_allowed);
12521cac6f2cSLuiz Capitulino 	return nid;
12531cac6f2cSLuiz Capitulino }
12541cac6f2cSLuiz Capitulino 
12551cac6f2cSLuiz Capitulino /*
12561cac6f2cSLuiz Capitulino  * returns the previously saved node ["this node"] from which to
12571cac6f2cSLuiz Capitulino  * allocate a persistent huge page for the pool and advance the
12581cac6f2cSLuiz Capitulino  * next node from which to allocate, handling wrap at end of node
12591cac6f2cSLuiz Capitulino  * mask.
12601cac6f2cSLuiz Capitulino  */
12611cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
12621cac6f2cSLuiz Capitulino 					nodemask_t *nodes_allowed)
12631cac6f2cSLuiz Capitulino {
12641cac6f2cSLuiz Capitulino 	int nid;
12651cac6f2cSLuiz Capitulino 
12661cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12671cac6f2cSLuiz Capitulino 
12681cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
12691cac6f2cSLuiz Capitulino 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
12701cac6f2cSLuiz Capitulino 
12711cac6f2cSLuiz Capitulino 	return nid;
12721cac6f2cSLuiz Capitulino }
12731cac6f2cSLuiz Capitulino 
12741cac6f2cSLuiz Capitulino /*
127510c6ec49SMike Kravetz  * helper for remove_pool_huge_page() - return the previously saved
12761cac6f2cSLuiz Capitulino  * node ["this node"] from which to free a huge page.  Advance the
12771cac6f2cSLuiz Capitulino  * next node id whether or not we find a free huge page to free so
12781cac6f2cSLuiz Capitulino  * that the next attempt to free addresses the next node.
12791cac6f2cSLuiz Capitulino  */
12801cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
12811cac6f2cSLuiz Capitulino {
12821cac6f2cSLuiz Capitulino 	int nid;
12831cac6f2cSLuiz Capitulino 
12841cac6f2cSLuiz Capitulino 	VM_BUG_ON(!nodes_allowed);
12851cac6f2cSLuiz Capitulino 
12861cac6f2cSLuiz Capitulino 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
12871cac6f2cSLuiz Capitulino 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
12881cac6f2cSLuiz Capitulino 
12891cac6f2cSLuiz Capitulino 	return nid;
12901cac6f2cSLuiz Capitulino }
12911cac6f2cSLuiz Capitulino 
12921cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
12931cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
12941cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
12951cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
12961cac6f2cSLuiz Capitulino 		nr_nodes--)
12971cac6f2cSLuiz Capitulino 
12981cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
12991cac6f2cSLuiz Capitulino 	for (nr_nodes = nodes_weight(*mask);				\
13001cac6f2cSLuiz Capitulino 		nr_nodes > 0 &&						\
13011cac6f2cSLuiz Capitulino 		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
13021cac6f2cSLuiz Capitulino 		nr_nodes--)
13031cac6f2cSLuiz Capitulino 
13048531fc6fSMike Kravetz /* used to demote non-gigantic_huge pages as well */
130534d9e35bSMike Kravetz static void __destroy_compound_gigantic_page(struct page *page,
130634d9e35bSMike Kravetz 					unsigned int order, bool demote)
1307944d9fecSLuiz Capitulino {
1308944d9fecSLuiz Capitulino 	int i;
1309944d9fecSLuiz Capitulino 	int nr_pages = 1 << order;
131014455eabSCheng Li 	struct page *p;
1311944d9fecSLuiz Capitulino 
1312c8cc708aSGerald Schaefer 	atomic_set(compound_mapcount_ptr(page), 0);
131347e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
131447e29d32SJohn Hubbard 
131514455eabSCheng Li 	for (i = 1; i < nr_pages; i++) {
131614455eabSCheng Li 		p = nth_page(page, i);
1317a01f4390SMike Kravetz 		p->mapping = NULL;
13181d798ca3SKirill A. Shutemov 		clear_compound_head(p);
131934d9e35bSMike Kravetz 		if (!demote)
1320944d9fecSLuiz Capitulino 			set_page_refcounted(p);
1321944d9fecSLuiz Capitulino 	}
1322944d9fecSLuiz Capitulino 
1323944d9fecSLuiz Capitulino 	set_compound_order(page, 0);
13245232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
1325ba9c1201SGerald Schaefer 	page[1].compound_nr = 0;
13265232c63fSMatthew Wilcox (Oracle) #endif
1327944d9fecSLuiz Capitulino 	__ClearPageHead(page);
1328944d9fecSLuiz Capitulino }
1329944d9fecSLuiz Capitulino 
13308531fc6fSMike Kravetz static void destroy_compound_hugetlb_page_for_demote(struct page *page,
13318531fc6fSMike Kravetz 					unsigned int order)
13328531fc6fSMike Kravetz {
13338531fc6fSMike Kravetz 	__destroy_compound_gigantic_page(page, order, true);
13348531fc6fSMike Kravetz }
13358531fc6fSMike Kravetz 
13368531fc6fSMike Kravetz #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
133734d9e35bSMike Kravetz static void destroy_compound_gigantic_page(struct page *page,
133834d9e35bSMike Kravetz 					unsigned int order)
133934d9e35bSMike Kravetz {
134034d9e35bSMike Kravetz 	__destroy_compound_gigantic_page(page, order, false);
134134d9e35bSMike Kravetz }
134234d9e35bSMike Kravetz 
1343d00181b9SKirill A. Shutemov static void free_gigantic_page(struct page *page, unsigned int order)
1344944d9fecSLuiz Capitulino {
1345cf11e85fSRoman Gushchin 	/*
1346cf11e85fSRoman Gushchin 	 * If the page isn't allocated using the cma allocator,
1347cf11e85fSRoman Gushchin 	 * cma_release() returns false.
1348cf11e85fSRoman Gushchin 	 */
1349dbda8feaSBarry Song #ifdef CONFIG_CMA
1350dbda8feaSBarry Song 	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1351cf11e85fSRoman Gushchin 		return;
1352dbda8feaSBarry Song #endif
1353cf11e85fSRoman Gushchin 
1354944d9fecSLuiz Capitulino 	free_contig_range(page_to_pfn(page), 1 << order);
1355944d9fecSLuiz Capitulino }
1356944d9fecSLuiz Capitulino 
13574eb0716eSAlexandre Ghiti #ifdef CONFIG_CONTIG_ALLOC
1358d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1359d9cc948fSMichal Hocko 		int nid, nodemask_t *nodemask)
1360944d9fecSLuiz Capitulino {
136104adbc3fSMiaohe Lin 	unsigned long nr_pages = pages_per_huge_page(h);
1362953f064aSLi Xinhai 	if (nid == NUMA_NO_NODE)
1363953f064aSLi Xinhai 		nid = numa_mem_id();
1364944d9fecSLuiz Capitulino 
1365dbda8feaSBarry Song #ifdef CONFIG_CMA
1366dbda8feaSBarry Song 	{
1367cf11e85fSRoman Gushchin 		struct page *page;
1368cf11e85fSRoman Gushchin 		int node;
1369cf11e85fSRoman Gushchin 
1370953f064aSLi Xinhai 		if (hugetlb_cma[nid]) {
1371953f064aSLi Xinhai 			page = cma_alloc(hugetlb_cma[nid], nr_pages,
1372953f064aSLi Xinhai 					huge_page_order(h), true);
1373953f064aSLi Xinhai 			if (page)
1374953f064aSLi Xinhai 				return page;
1375953f064aSLi Xinhai 		}
1376953f064aSLi Xinhai 
1377953f064aSLi Xinhai 		if (!(gfp_mask & __GFP_THISNODE)) {
1378cf11e85fSRoman Gushchin 			for_each_node_mask(node, *nodemask) {
1379953f064aSLi Xinhai 				if (node == nid || !hugetlb_cma[node])
1380cf11e85fSRoman Gushchin 					continue;
1381cf11e85fSRoman Gushchin 
1382cf11e85fSRoman Gushchin 				page = cma_alloc(hugetlb_cma[node], nr_pages,
1383cf11e85fSRoman Gushchin 						huge_page_order(h), true);
1384cf11e85fSRoman Gushchin 				if (page)
1385cf11e85fSRoman Gushchin 					return page;
1386cf11e85fSRoman Gushchin 			}
1387cf11e85fSRoman Gushchin 		}
1388953f064aSLi Xinhai 	}
1389dbda8feaSBarry Song #endif
1390cf11e85fSRoman Gushchin 
13915e27a2dfSAnshuman Khandual 	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1392944d9fecSLuiz Capitulino }
1393944d9fecSLuiz Capitulino 
13944eb0716eSAlexandre Ghiti #else /* !CONFIG_CONTIG_ALLOC */
13954eb0716eSAlexandre Ghiti static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
13964eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
13974eb0716eSAlexandre Ghiti {
13984eb0716eSAlexandre Ghiti 	return NULL;
13994eb0716eSAlexandre Ghiti }
14004eb0716eSAlexandre Ghiti #endif /* CONFIG_CONTIG_ALLOC */
1401944d9fecSLuiz Capitulino 
1402e1073d1eSAneesh Kumar K.V #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1403d9cc948fSMichal Hocko static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
14044eb0716eSAlexandre Ghiti 					int nid, nodemask_t *nodemask)
14054eb0716eSAlexandre Ghiti {
14064eb0716eSAlexandre Ghiti 	return NULL;
14074eb0716eSAlexandre Ghiti }
1408d00181b9SKirill A. Shutemov static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1409944d9fecSLuiz Capitulino static inline void destroy_compound_gigantic_page(struct page *page,
1410d00181b9SKirill A. Shutemov 						unsigned int order) { }
1411944d9fecSLuiz Capitulino #endif
1412944d9fecSLuiz Capitulino 
14136eb4e88aSMike Kravetz /*
14146eb4e88aSMike Kravetz  * Remove hugetlb page from lists, and update dtor so that page appears
141534d9e35bSMike Kravetz  * as just a compound page.
141634d9e35bSMike Kravetz  *
141734d9e35bSMike Kravetz  * A reference is held on the page, except in the case of demote.
14186eb4e88aSMike Kravetz  *
14196eb4e88aSMike Kravetz  * Must be called with hugetlb lock held.
14206eb4e88aSMike Kravetz  */
142134d9e35bSMike Kravetz static void __remove_hugetlb_page(struct hstate *h, struct page *page,
142234d9e35bSMike Kravetz 							bool adjust_surplus,
142334d9e35bSMike Kravetz 							bool demote)
14246eb4e88aSMike Kravetz {
14256eb4e88aSMike Kravetz 	int nid = page_to_nid(page);
14266eb4e88aSMike Kravetz 
14276eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
14286eb4e88aSMike Kravetz 	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
14296eb4e88aSMike Kravetz 
14309487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
14316eb4e88aSMike Kravetz 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
14326eb4e88aSMike Kravetz 		return;
14336eb4e88aSMike Kravetz 
14346eb4e88aSMike Kravetz 	list_del(&page->lru);
14356eb4e88aSMike Kravetz 
14366eb4e88aSMike Kravetz 	if (HPageFreed(page)) {
14376eb4e88aSMike Kravetz 		h->free_huge_pages--;
14386eb4e88aSMike Kravetz 		h->free_huge_pages_node[nid]--;
14396eb4e88aSMike Kravetz 	}
14406eb4e88aSMike Kravetz 	if (adjust_surplus) {
14416eb4e88aSMike Kravetz 		h->surplus_huge_pages--;
14426eb4e88aSMike Kravetz 		h->surplus_huge_pages_node[nid]--;
14436eb4e88aSMike Kravetz 	}
14446eb4e88aSMike Kravetz 
1445e32d20c0SMike Kravetz 	/*
1446e32d20c0SMike Kravetz 	 * Very subtle
1447e32d20c0SMike Kravetz 	 *
1448e32d20c0SMike Kravetz 	 * For non-gigantic pages set the destructor to the normal compound
1449e32d20c0SMike Kravetz 	 * page dtor.  This is needed in case someone takes an additional
1450e32d20c0SMike Kravetz 	 * temporary ref to the page, and freeing is delayed until they drop
1451e32d20c0SMike Kravetz 	 * their reference.
1452e32d20c0SMike Kravetz 	 *
1453e32d20c0SMike Kravetz 	 * For gigantic pages set the destructor to the null dtor.  This
1454e32d20c0SMike Kravetz 	 * destructor will never be called.  Before freeing the gigantic
1455e32d20c0SMike Kravetz 	 * page destroy_compound_gigantic_page will turn the compound page
1456e32d20c0SMike Kravetz 	 * into a simple group of pages.  After this the destructor does not
1457e32d20c0SMike Kravetz 	 * apply.
1458e32d20c0SMike Kravetz 	 *
1459e32d20c0SMike Kravetz 	 * This handles the case where more than one ref is held when and
1460e32d20c0SMike Kravetz 	 * after update_and_free_page is called.
146134d9e35bSMike Kravetz 	 *
146234d9e35bSMike Kravetz 	 * In the case of demote we do not ref count the page as it will soon
146334d9e35bSMike Kravetz 	 * be turned into a page of smaller size.
1464e32d20c0SMike Kravetz 	 */
146534d9e35bSMike Kravetz 	if (!demote)
14666eb4e88aSMike Kravetz 		set_page_refcounted(page);
1467e32d20c0SMike Kravetz 	if (hstate_is_gigantic(h))
14686eb4e88aSMike Kravetz 		set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1469e32d20c0SMike Kravetz 	else
1470e32d20c0SMike Kravetz 		set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
14716eb4e88aSMike Kravetz 
14726eb4e88aSMike Kravetz 	h->nr_huge_pages--;
14736eb4e88aSMike Kravetz 	h->nr_huge_pages_node[nid]--;
14746eb4e88aSMike Kravetz }
14756eb4e88aSMike Kravetz 
147634d9e35bSMike Kravetz static void remove_hugetlb_page(struct hstate *h, struct page *page,
147734d9e35bSMike Kravetz 							bool adjust_surplus)
147834d9e35bSMike Kravetz {
147934d9e35bSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, false);
148034d9e35bSMike Kravetz }
148134d9e35bSMike Kravetz 
14828531fc6fSMike Kravetz static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page,
14838531fc6fSMike Kravetz 							bool adjust_surplus)
14848531fc6fSMike Kravetz {
14858531fc6fSMike Kravetz 	__remove_hugetlb_page(h, page, adjust_surplus, true);
14868531fc6fSMike Kravetz }
14878531fc6fSMike Kravetz 
1488ad2fa371SMuchun Song static void add_hugetlb_page(struct hstate *h, struct page *page,
1489ad2fa371SMuchun Song 			     bool adjust_surplus)
1490ad2fa371SMuchun Song {
1491ad2fa371SMuchun Song 	int zeroed;
1492ad2fa371SMuchun Song 	int nid = page_to_nid(page);
1493ad2fa371SMuchun Song 
1494ad2fa371SMuchun Song 	VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1495ad2fa371SMuchun Song 
1496ad2fa371SMuchun Song 	lockdep_assert_held(&hugetlb_lock);
1497ad2fa371SMuchun Song 
1498ad2fa371SMuchun Song 	INIT_LIST_HEAD(&page->lru);
1499ad2fa371SMuchun Song 	h->nr_huge_pages++;
1500ad2fa371SMuchun Song 	h->nr_huge_pages_node[nid]++;
1501ad2fa371SMuchun Song 
1502ad2fa371SMuchun Song 	if (adjust_surplus) {
1503ad2fa371SMuchun Song 		h->surplus_huge_pages++;
1504ad2fa371SMuchun Song 		h->surplus_huge_pages_node[nid]++;
1505ad2fa371SMuchun Song 	}
1506ad2fa371SMuchun Song 
1507ad2fa371SMuchun Song 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1508ad2fa371SMuchun Song 	set_page_private(page, 0);
1509a9e1eab2SMiaohe Lin 	/*
1510a9e1eab2SMiaohe Lin 	 * We have to set HPageVmemmapOptimized again as above
1511a9e1eab2SMiaohe Lin 	 * set_page_private(page, 0) cleared it.
1512a9e1eab2SMiaohe Lin 	 */
1513ad2fa371SMuchun Song 	SetHPageVmemmapOptimized(page);
1514ad2fa371SMuchun Song 
1515ad2fa371SMuchun Song 	/*
1516b65a4edaSMike Kravetz 	 * This page is about to be managed by the hugetlb allocator and
1517b65a4edaSMike Kravetz 	 * should have no users.  Drop our reference, and check for others
1518b65a4edaSMike Kravetz 	 * just in case.
1519ad2fa371SMuchun Song 	 */
1520ad2fa371SMuchun Song 	zeroed = put_page_testzero(page);
1521b65a4edaSMike Kravetz 	if (!zeroed)
1522b65a4edaSMike Kravetz 		/*
1523b65a4edaSMike Kravetz 		 * It is VERY unlikely soneone else has taken a ref on
1524b65a4edaSMike Kravetz 		 * the page.  In this case, we simply return as the
1525b65a4edaSMike Kravetz 		 * hugetlb destructor (free_huge_page) will be called
1526b65a4edaSMike Kravetz 		 * when this other ref is dropped.
1527b65a4edaSMike Kravetz 		 */
1528b65a4edaSMike Kravetz 		return;
1529b65a4edaSMike Kravetz 
1530ad2fa371SMuchun Song 	arch_clear_hugepage_flags(page);
1531ad2fa371SMuchun Song 	enqueue_huge_page(h, page);
1532ad2fa371SMuchun Song }
1533ad2fa371SMuchun Song 
1534b65d4adbSMuchun Song static void __update_and_free_page(struct hstate *h, struct page *page)
15356af2acb6SAdam Litke {
15366af2acb6SAdam Litke 	int i;
153714455eabSCheng Li 	struct page *subpage;
1538a5516438SAndi Kleen 
15394eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1540944d9fecSLuiz Capitulino 		return;
154118229df5SAndy Whitcroft 
1542161df60eSNaoya Horiguchi 	/*
1543161df60eSNaoya Horiguchi 	 * If we don't know which subpages are hwpoisoned, we can't free
1544161df60eSNaoya Horiguchi 	 * the hugepage, so it's leaked intentionally.
1545161df60eSNaoya Horiguchi 	 */
1546161df60eSNaoya Horiguchi 	if (HPageRawHwpUnreliable(page))
1547161df60eSNaoya Horiguchi 		return;
1548161df60eSNaoya Horiguchi 
15496213834cSMuchun Song 	if (hugetlb_vmemmap_restore(h, page)) {
1550ad2fa371SMuchun Song 		spin_lock_irq(&hugetlb_lock);
1551ad2fa371SMuchun Song 		/*
1552ad2fa371SMuchun Song 		 * If we cannot allocate vmemmap pages, just refuse to free the
1553ad2fa371SMuchun Song 		 * page and put the page back on the hugetlb free list and treat
1554ad2fa371SMuchun Song 		 * as a surplus page.
1555ad2fa371SMuchun Song 		 */
1556ad2fa371SMuchun Song 		add_hugetlb_page(h, page, true);
1557ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
1558ad2fa371SMuchun Song 		return;
1559ad2fa371SMuchun Song 	}
1560ad2fa371SMuchun Song 
1561161df60eSNaoya Horiguchi 	/*
1562161df60eSNaoya Horiguchi 	 * Move PageHWPoison flag from head page to the raw error pages,
1563161df60eSNaoya Horiguchi 	 * which makes any healthy subpages reusable.
1564161df60eSNaoya Horiguchi 	 */
1565161df60eSNaoya Horiguchi 	if (unlikely(PageHWPoison(page)))
1566161df60eSNaoya Horiguchi 		hugetlb_clear_page_hwpoison(page);
1567161df60eSNaoya Horiguchi 
156814455eabSCheng Li 	for (i = 0; i < pages_per_huge_page(h); i++) {
156914455eabSCheng Li 		subpage = nth_page(page, i);
1570dbfee5aeSMike Kravetz 		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
157132f84528SChris Forbes 				1 << PG_referenced | 1 << PG_dirty |
1572a7407a27SLuiz Capitulino 				1 << PG_active | 1 << PG_private |
1573a7407a27SLuiz Capitulino 				1 << PG_writeback);
15746af2acb6SAdam Litke 	}
1575a01f4390SMike Kravetz 
1576a01f4390SMike Kravetz 	/*
1577a01f4390SMike Kravetz 	 * Non-gigantic pages demoted from CMA allocated gigantic pages
1578a01f4390SMike Kravetz 	 * need to be given back to CMA in free_gigantic_page.
1579a01f4390SMike Kravetz 	 */
1580a01f4390SMike Kravetz 	if (hstate_is_gigantic(h) ||
1581a01f4390SMike Kravetz 	    hugetlb_cma_page(page, huge_page_order(h))) {
1582944d9fecSLuiz Capitulino 		destroy_compound_gigantic_page(page, huge_page_order(h));
1583944d9fecSLuiz Capitulino 		free_gigantic_page(page, huge_page_order(h));
1584944d9fecSLuiz Capitulino 	} else {
1585a5516438SAndi Kleen 		__free_pages(page, huge_page_order(h));
15866af2acb6SAdam Litke 	}
1587944d9fecSLuiz Capitulino }
15886af2acb6SAdam Litke 
1589b65d4adbSMuchun Song /*
1590b65d4adbSMuchun Song  * As update_and_free_page() can be called under any context, so we cannot
1591b65d4adbSMuchun Song  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1592b65d4adbSMuchun Song  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1593b65d4adbSMuchun Song  * the vmemmap pages.
1594b65d4adbSMuchun Song  *
1595b65d4adbSMuchun Song  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1596b65d4adbSMuchun Song  * freed and frees them one-by-one. As the page->mapping pointer is going
1597b65d4adbSMuchun Song  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1598b65d4adbSMuchun Song  * structure of a lockless linked list of huge pages to be freed.
1599b65d4adbSMuchun Song  */
1600b65d4adbSMuchun Song static LLIST_HEAD(hpage_freelist);
1601b65d4adbSMuchun Song 
1602b65d4adbSMuchun Song static void free_hpage_workfn(struct work_struct *work)
1603b65d4adbSMuchun Song {
1604b65d4adbSMuchun Song 	struct llist_node *node;
1605b65d4adbSMuchun Song 
1606b65d4adbSMuchun Song 	node = llist_del_all(&hpage_freelist);
1607b65d4adbSMuchun Song 
1608b65d4adbSMuchun Song 	while (node) {
1609b65d4adbSMuchun Song 		struct page *page;
1610b65d4adbSMuchun Song 		struct hstate *h;
1611b65d4adbSMuchun Song 
1612b65d4adbSMuchun Song 		page = container_of((struct address_space **)node,
1613b65d4adbSMuchun Song 				     struct page, mapping);
1614b65d4adbSMuchun Song 		node = node->next;
1615b65d4adbSMuchun Song 		page->mapping = NULL;
1616b65d4adbSMuchun Song 		/*
1617b65d4adbSMuchun Song 		 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1618b65d4adbSMuchun Song 		 * is going to trigger because a previous call to
1619b65d4adbSMuchun Song 		 * remove_hugetlb_page() will set_compound_page_dtor(page,
1620b65d4adbSMuchun Song 		 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1621b65d4adbSMuchun Song 		 */
1622b65d4adbSMuchun Song 		h = size_to_hstate(page_size(page));
1623b65d4adbSMuchun Song 
1624b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1625b65d4adbSMuchun Song 
1626b65d4adbSMuchun Song 		cond_resched();
1627b65d4adbSMuchun Song 	}
1628b65d4adbSMuchun Song }
1629b65d4adbSMuchun Song static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1630b65d4adbSMuchun Song 
1631b65d4adbSMuchun Song static inline void flush_free_hpage_work(struct hstate *h)
1632b65d4adbSMuchun Song {
16336213834cSMuchun Song 	if (hugetlb_vmemmap_optimizable(h))
1634b65d4adbSMuchun Song 		flush_work(&free_hpage_work);
1635b65d4adbSMuchun Song }
1636b65d4adbSMuchun Song 
1637b65d4adbSMuchun Song static void update_and_free_page(struct hstate *h, struct page *page,
1638b65d4adbSMuchun Song 				 bool atomic)
1639b65d4adbSMuchun Song {
1640ad2fa371SMuchun Song 	if (!HPageVmemmapOptimized(page) || !atomic) {
1641b65d4adbSMuchun Song 		__update_and_free_page(h, page);
1642b65d4adbSMuchun Song 		return;
1643b65d4adbSMuchun Song 	}
1644b65d4adbSMuchun Song 
1645b65d4adbSMuchun Song 	/*
1646b65d4adbSMuchun Song 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1647b65d4adbSMuchun Song 	 *
1648b65d4adbSMuchun Song 	 * Only call schedule_work() if hpage_freelist is previously
1649b65d4adbSMuchun Song 	 * empty. Otherwise, schedule_work() had been called but the workfn
1650b65d4adbSMuchun Song 	 * hasn't retrieved the list yet.
1651b65d4adbSMuchun Song 	 */
1652b65d4adbSMuchun Song 	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1653b65d4adbSMuchun Song 		schedule_work(&free_hpage_work);
1654b65d4adbSMuchun Song }
1655b65d4adbSMuchun Song 
165610c6ec49SMike Kravetz static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
165710c6ec49SMike Kravetz {
165810c6ec49SMike Kravetz 	struct page *page, *t_page;
165910c6ec49SMike Kravetz 
166010c6ec49SMike Kravetz 	list_for_each_entry_safe(page, t_page, list, lru) {
1661b65d4adbSMuchun Song 		update_and_free_page(h, page, false);
166210c6ec49SMike Kravetz 		cond_resched();
166310c6ec49SMike Kravetz 	}
166410c6ec49SMike Kravetz }
166510c6ec49SMike Kravetz 
1666e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1667e5ff2159SAndi Kleen {
1668e5ff2159SAndi Kleen 	struct hstate *h;
1669e5ff2159SAndi Kleen 
1670e5ff2159SAndi Kleen 	for_each_hstate(h) {
1671e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
1672e5ff2159SAndi Kleen 			return h;
1673e5ff2159SAndi Kleen 	}
1674e5ff2159SAndi Kleen 	return NULL;
1675e5ff2159SAndi Kleen }
1676e5ff2159SAndi Kleen 
1677db71ef79SMike Kravetz void free_huge_page(struct page *page)
167827a85ef1SDavid Gibson {
1679a5516438SAndi Kleen 	/*
1680a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
1681a5516438SAndi Kleen 	 * compound page destructor.
1682a5516438SAndi Kleen 	 */
1683e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
16847893d1d5SAdam Litke 	int nid = page_to_nid(page);
1685d6995da3SMike Kravetz 	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
168607443a85SJoonsoo Kim 	bool restore_reserve;
1687db71ef79SMike Kravetz 	unsigned long flags;
168827a85ef1SDavid Gibson 
1689b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_count(page), page);
1690b4330afbSMike Kravetz 	VM_BUG_ON_PAGE(page_mapcount(page), page);
16918ace22bcSYongkai Wu 
1692d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
169378fbe906SDavid Hildenbrand 	if (PageAnon(page))
169478fbe906SDavid Hildenbrand 		__ClearPageAnonExclusive(page);
16958ace22bcSYongkai Wu 	page->mapping = NULL;
1696d6995da3SMike Kravetz 	restore_reserve = HPageRestoreReserve(page);
1697d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
169827a85ef1SDavid Gibson 
16991c5ecae3SMike Kravetz 	/*
1700d6995da3SMike Kravetz 	 * If HPageRestoreReserve was set on page, page allocation consumed a
17010919e1b6SMike Kravetz 	 * reservation.  If the page was associated with a subpool, there
17020919e1b6SMike Kravetz 	 * would have been a page reserved in the subpool before allocation
17030919e1b6SMike Kravetz 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
17046c26d310SMiaohe Lin 	 * reservation, do not call hugepage_subpool_put_pages() as this will
17050919e1b6SMike Kravetz 	 * remove the reserved page from the subpool.
17060919e1b6SMike Kravetz 	 */
17070919e1b6SMike Kravetz 	if (!restore_reserve) {
17080919e1b6SMike Kravetz 		/*
17090919e1b6SMike Kravetz 		 * A return code of zero implies that the subpool will be
17100919e1b6SMike Kravetz 		 * under its minimum size if the reservation is not restored
17110919e1b6SMike Kravetz 		 * after page is free.  Therefore, force restore_reserve
17120919e1b6SMike Kravetz 		 * operation.
17131c5ecae3SMike Kravetz 		 */
17141c5ecae3SMike Kravetz 		if (hugepage_subpool_put_pages(spool, 1) == 0)
17151c5ecae3SMike Kravetz 			restore_reserve = true;
17160919e1b6SMike Kravetz 	}
17171c5ecae3SMike Kravetz 
1718db71ef79SMike Kravetz 	spin_lock_irqsave(&hugetlb_lock, flags);
17198f251a3dSMike Kravetz 	ClearHPageMigratable(page);
17206d76dcf4SAneesh Kumar K.V 	hugetlb_cgroup_uncharge_page(hstate_index(h),
17216d76dcf4SAneesh Kumar K.V 				     pages_per_huge_page(h), page);
172208cf9fafSMina Almasry 	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
172308cf9fafSMina Almasry 					  pages_per_huge_page(h), page);
172407443a85SJoonsoo Kim 	if (restore_reserve)
172507443a85SJoonsoo Kim 		h->resv_huge_pages++;
172607443a85SJoonsoo Kim 
17279157c311SMike Kravetz 	if (HPageTemporary(page)) {
17286eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, false);
1729db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1730b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
1731ab5ac90aSMichal Hocko 	} else if (h->surplus_huge_pages_node[nid]) {
17320edaecfaSAneesh Kumar K.V 		/* remove the page from active list */
17336eb4e88aSMike Kravetz 		remove_hugetlb_page(h, page, true);
1734db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1735b65d4adbSMuchun Song 		update_and_free_page(h, page, true);
17367893d1d5SAdam Litke 	} else {
17375d3a551cSWill Deacon 		arch_clear_hugepage_flags(page);
1738a5516438SAndi Kleen 		enqueue_huge_page(h, page);
1739db71ef79SMike Kravetz 		spin_unlock_irqrestore(&hugetlb_lock, flags);
174027a85ef1SDavid Gibson 	}
17411121828aSMike Kravetz }
174227a85ef1SDavid Gibson 
1743d3d99fccSOscar Salvador /*
1744d3d99fccSOscar Salvador  * Must be called with the hugetlb lock held
1745d3d99fccSOscar Salvador  */
1746d3d99fccSOscar Salvador static void __prep_account_new_huge_page(struct hstate *h, int nid)
1747d3d99fccSOscar Salvador {
1748d3d99fccSOscar Salvador 	lockdep_assert_held(&hugetlb_lock);
1749d3d99fccSOscar Salvador 	h->nr_huge_pages++;
1750d3d99fccSOscar Salvador 	h->nr_huge_pages_node[nid]++;
1751d3d99fccSOscar Salvador }
1752d3d99fccSOscar Salvador 
1753f41f2ed4SMuchun Song static void __prep_new_huge_page(struct hstate *h, struct page *page)
1754b7ba30c6SAndi Kleen {
17556213834cSMuchun Song 	hugetlb_vmemmap_optimize(h, page);
17560edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&page->lru);
1757f1e61557SKirill A. Shutemov 	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1758ff546117SMike Kravetz 	hugetlb_set_page_subpool(page, NULL);
17599dd540e2SAneesh Kumar K.V 	set_hugetlb_cgroup(page, NULL);
17601adc4d41SMina Almasry 	set_hugetlb_cgroup_rsvd(page, NULL);
1761d3d99fccSOscar Salvador }
1762d3d99fccSOscar Salvador 
1763d3d99fccSOscar Salvador static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1764d3d99fccSOscar Salvador {
1765f41f2ed4SMuchun Song 	__prep_new_huge_page(h, page);
1766db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
1767d3d99fccSOscar Salvador 	__prep_account_new_huge_page(h, nid);
1768db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
1769b7ba30c6SAndi Kleen }
1770b7ba30c6SAndi Kleen 
177134d9e35bSMike Kravetz static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
177234d9e35bSMike Kravetz 								bool demote)
177320a0307cSWu Fengguang {
17747118fc29SMike Kravetz 	int i, j;
177520a0307cSWu Fengguang 	int nr_pages = 1 << order;
177614455eabSCheng Li 	struct page *p;
177720a0307cSWu Fengguang 
177820a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
177920a0307cSWu Fengguang 	set_compound_order(page, order);
1780ef5a22beSAndrea Arcangeli 	__ClearPageReserved(page);
1781de09d31dSKirill A. Shutemov 	__SetPageHead(page);
178214455eabSCheng Li 	for (i = 1; i < nr_pages; i++) {
178314455eabSCheng Li 		p = nth_page(page, i);
178414455eabSCheng Li 
1785ef5a22beSAndrea Arcangeli 		/*
1786ef5a22beSAndrea Arcangeli 		 * For gigantic hugepages allocated through bootmem at
1787ef5a22beSAndrea Arcangeli 		 * boot, it's safer to be consistent with the not-gigantic
1788ef5a22beSAndrea Arcangeli 		 * hugepages and clear the PG_reserved bit from all tail pages
17897c8de358SEthon Paul 		 * too.  Otherwise drivers using get_user_pages() to access tail
1790ef5a22beSAndrea Arcangeli 		 * pages may get the reference counting wrong if they see
1791ef5a22beSAndrea Arcangeli 		 * PG_reserved set on a tail page (despite the head page not
1792ef5a22beSAndrea Arcangeli 		 * having PG_reserved set).  Enforcing this consistency between
1793ef5a22beSAndrea Arcangeli 		 * head and tail pages allows drivers to optimize away a check
1794ef5a22beSAndrea Arcangeli 		 * on the head page when they need know if put_page() is needed
1795ef5a22beSAndrea Arcangeli 		 * after get_user_pages().
1796ef5a22beSAndrea Arcangeli 		 */
1797ef5a22beSAndrea Arcangeli 		__ClearPageReserved(p);
17987118fc29SMike Kravetz 		/*
17997118fc29SMike Kravetz 		 * Subtle and very unlikely
18007118fc29SMike Kravetz 		 *
18017118fc29SMike Kravetz 		 * Gigantic 'page allocators' such as memblock or cma will
18027118fc29SMike Kravetz 		 * return a set of pages with each page ref counted.  We need
18037118fc29SMike Kravetz 		 * to turn this set of pages into a compound page with tail
18047118fc29SMike Kravetz 		 * page ref counts set to zero.  Code such as speculative page
18057118fc29SMike Kravetz 		 * cache adding could take a ref on a 'to be' tail page.
18067118fc29SMike Kravetz 		 * We need to respect any increased ref count, and only set
18077118fc29SMike Kravetz 		 * the ref count to zero if count is currently 1.  If count
1808416d85edSMike Kravetz 		 * is not 1, we return an error.  An error return indicates
1809416d85edSMike Kravetz 		 * the set of pages can not be converted to a gigantic page.
1810416d85edSMike Kravetz 		 * The caller who allocated the pages should then discard the
1811416d85edSMike Kravetz 		 * pages using the appropriate free interface.
181234d9e35bSMike Kravetz 		 *
181334d9e35bSMike Kravetz 		 * In the case of demote, the ref count will be zero.
18147118fc29SMike Kravetz 		 */
181534d9e35bSMike Kravetz 		if (!demote) {
18167118fc29SMike Kravetz 			if (!page_ref_freeze(p, 1)) {
1817416d85edSMike Kravetz 				pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
18187118fc29SMike Kravetz 				goto out_error;
18197118fc29SMike Kravetz 			}
182034d9e35bSMike Kravetz 		} else {
182134d9e35bSMike Kravetz 			VM_BUG_ON_PAGE(page_count(p), p);
182234d9e35bSMike Kravetz 		}
18231d798ca3SKirill A. Shutemov 		set_compound_head(p, page);
182420a0307cSWu Fengguang 	}
1825b4330afbSMike Kravetz 	atomic_set(compound_mapcount_ptr(page), -1);
182647e29d32SJohn Hubbard 	atomic_set(compound_pincount_ptr(page), 0);
18277118fc29SMike Kravetz 	return true;
18287118fc29SMike Kravetz 
18297118fc29SMike Kravetz out_error:
18307118fc29SMike Kravetz 	/* undo tail page modifications made above */
183114455eabSCheng Li 	for (j = 1; j < i; j++) {
183214455eabSCheng Li 		p = nth_page(page, j);
18337118fc29SMike Kravetz 		clear_compound_head(p);
18347118fc29SMike Kravetz 		set_page_refcounted(p);
18357118fc29SMike Kravetz 	}
18367118fc29SMike Kravetz 	/* need to clear PG_reserved on remaining tail pages  */
183714455eabSCheng Li 	for (; j < nr_pages; j++) {
183814455eabSCheng Li 		p = nth_page(page, j);
18397118fc29SMike Kravetz 		__ClearPageReserved(p);
184014455eabSCheng Li 	}
18417118fc29SMike Kravetz 	set_compound_order(page, 0);
18425232c63fSMatthew Wilcox (Oracle) #ifdef CONFIG_64BIT
18437118fc29SMike Kravetz 	page[1].compound_nr = 0;
18445232c63fSMatthew Wilcox (Oracle) #endif
18457118fc29SMike Kravetz 	__ClearPageHead(page);
18467118fc29SMike Kravetz 	return false;
184720a0307cSWu Fengguang }
184820a0307cSWu Fengguang 
184934d9e35bSMike Kravetz static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
185034d9e35bSMike Kravetz {
185134d9e35bSMike Kravetz 	return __prep_compound_gigantic_page(page, order, false);
185234d9e35bSMike Kravetz }
185334d9e35bSMike Kravetz 
18548531fc6fSMike Kravetz static bool prep_compound_gigantic_page_for_demote(struct page *page,
18558531fc6fSMike Kravetz 							unsigned int order)
18568531fc6fSMike Kravetz {
18578531fc6fSMike Kravetz 	return __prep_compound_gigantic_page(page, order, true);
18588531fc6fSMike Kravetz }
18598531fc6fSMike Kravetz 
18607795912cSAndrew Morton /*
18617795912cSAndrew Morton  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
18627795912cSAndrew Morton  * transparent huge pages.  See the PageTransHuge() documentation for more
18637795912cSAndrew Morton  * details.
18647795912cSAndrew Morton  */
186520a0307cSWu Fengguang int PageHuge(struct page *page)
186620a0307cSWu Fengguang {
186720a0307cSWu Fengguang 	if (!PageCompound(page))
186820a0307cSWu Fengguang 		return 0;
186920a0307cSWu Fengguang 
187020a0307cSWu Fengguang 	page = compound_head(page);
1871f1e61557SKirill A. Shutemov 	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
187220a0307cSWu Fengguang }
187343131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge);
187443131e14SNaoya Horiguchi 
187527c73ae7SAndrea Arcangeli /*
187627c73ae7SAndrea Arcangeli  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
187727c73ae7SAndrea Arcangeli  * normal or transparent huge pages.
187827c73ae7SAndrea Arcangeli  */
187927c73ae7SAndrea Arcangeli int PageHeadHuge(struct page *page_head)
188027c73ae7SAndrea Arcangeli {
188127c73ae7SAndrea Arcangeli 	if (!PageHead(page_head))
188227c73ae7SAndrea Arcangeli 		return 0;
188327c73ae7SAndrea Arcangeli 
1884d4af73e3SVlastimil Babka 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
188527c73ae7SAndrea Arcangeli }
18864e936eccSDavid Howells EXPORT_SYMBOL_GPL(PageHeadHuge);
188727c73ae7SAndrea Arcangeli 
1888c0d0381aSMike Kravetz /*
1889c0d0381aSMike Kravetz  * Find and lock address space (mapping) in write mode.
1890c0d0381aSMike Kravetz  *
1891336bf30eSMike Kravetz  * Upon entry, the page is locked which means that page_mapping() is
1892336bf30eSMike Kravetz  * stable.  Due to locking order, we can only trylock_write.  If we can
1893336bf30eSMike Kravetz  * not get the lock, simply return NULL to caller.
1894c0d0381aSMike Kravetz  */
1895c0d0381aSMike Kravetz struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1896c0d0381aSMike Kravetz {
1897336bf30eSMike Kravetz 	struct address_space *mapping = page_mapping(hpage);
1898c0d0381aSMike Kravetz 
1899c0d0381aSMike Kravetz 	if (!mapping)
1900c0d0381aSMike Kravetz 		return mapping;
1901c0d0381aSMike Kravetz 
1902c0d0381aSMike Kravetz 	if (i_mmap_trylock_write(mapping))
1903c0d0381aSMike Kravetz 		return mapping;
1904c0d0381aSMike Kravetz 
1905c0d0381aSMike Kravetz 	return NULL;
1906c0d0381aSMike Kravetz }
1907c0d0381aSMike Kravetz 
1908fe19bd3dSHugh Dickins pgoff_t hugetlb_basepage_index(struct page *page)
190913d60f4bSZhang Yi {
191013d60f4bSZhang Yi 	struct page *page_head = compound_head(page);
191113d60f4bSZhang Yi 	pgoff_t index = page_index(page_head);
191213d60f4bSZhang Yi 	unsigned long compound_idx;
191313d60f4bSZhang Yi 
191413d60f4bSZhang Yi 	if (compound_order(page_head) >= MAX_ORDER)
191513d60f4bSZhang Yi 		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
191613d60f4bSZhang Yi 	else
191713d60f4bSZhang Yi 		compound_idx = page - page_head;
191813d60f4bSZhang Yi 
191913d60f4bSZhang Yi 	return (index << compound_order(page_head)) + compound_idx;
192013d60f4bSZhang Yi }
192113d60f4bSZhang Yi 
19220c397daeSMichal Hocko static struct page *alloc_buddy_huge_page(struct hstate *h,
1923f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1924f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19251da177e4SLinus Torvalds {
1926af0fb9dfSMichal Hocko 	int order = huge_page_order(h);
19271da177e4SLinus Torvalds 	struct page *page;
1928f60858f9SMike Kravetz 	bool alloc_try_hard = true;
1929f96efd58SJoe Jin 
1930f60858f9SMike Kravetz 	/*
1931f60858f9SMike Kravetz 	 * By default we always try hard to allocate the page with
1932f60858f9SMike Kravetz 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
1933f60858f9SMike Kravetz 	 * a loop (to adjust global huge page counts) and previous allocation
1934f60858f9SMike Kravetz 	 * failed, do not continue to try hard on the same node.  Use the
1935f60858f9SMike Kravetz 	 * node_alloc_noretry bitmap to manage this state information.
1936f60858f9SMike Kravetz 	 */
1937f60858f9SMike Kravetz 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1938f60858f9SMike Kravetz 		alloc_try_hard = false;
1939f60858f9SMike Kravetz 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1940f60858f9SMike Kravetz 	if (alloc_try_hard)
1941f60858f9SMike Kravetz 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1942af0fb9dfSMichal Hocko 	if (nid == NUMA_NO_NODE)
1943af0fb9dfSMichal Hocko 		nid = numa_mem_id();
194484172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp_mask, order, nid, nmask);
1945af0fb9dfSMichal Hocko 	if (page)
1946af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC);
1947af0fb9dfSMichal Hocko 	else
1948af0fb9dfSMichal Hocko 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
194963b4613cSNishanth Aravamudan 
1950f60858f9SMike Kravetz 	/*
1951f60858f9SMike Kravetz 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1952f60858f9SMike Kravetz 	 * indicates an overall state change.  Clear bit so that we resume
1953f60858f9SMike Kravetz 	 * normal 'try hard' allocations.
1954f60858f9SMike Kravetz 	 */
1955f60858f9SMike Kravetz 	if (node_alloc_noretry && page && !alloc_try_hard)
1956f60858f9SMike Kravetz 		node_clear(nid, *node_alloc_noretry);
1957f60858f9SMike Kravetz 
1958f60858f9SMike Kravetz 	/*
1959f60858f9SMike Kravetz 	 * If we tried hard to get a page but failed, set bit so that
1960f60858f9SMike Kravetz 	 * subsequent attempts will not try as hard until there is an
1961f60858f9SMike Kravetz 	 * overall state change.
1962f60858f9SMike Kravetz 	 */
1963f60858f9SMike Kravetz 	if (node_alloc_noretry && !page && alloc_try_hard)
1964f60858f9SMike Kravetz 		node_set(nid, *node_alloc_noretry);
1965f60858f9SMike Kravetz 
196663b4613cSNishanth Aravamudan 	return page;
196763b4613cSNishanth Aravamudan }
196863b4613cSNishanth Aravamudan 
1969af0fb9dfSMichal Hocko /*
19700c397daeSMichal Hocko  * Common helper to allocate a fresh hugetlb page. All specific allocators
19710c397daeSMichal Hocko  * should use this function to get new hugetlb pages
19720c397daeSMichal Hocko  */
19730c397daeSMichal Hocko static struct page *alloc_fresh_huge_page(struct hstate *h,
1974f60858f9SMike Kravetz 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1975f60858f9SMike Kravetz 		nodemask_t *node_alloc_noretry)
19760c397daeSMichal Hocko {
19770c397daeSMichal Hocko 	struct page *page;
19787118fc29SMike Kravetz 	bool retry = false;
19790c397daeSMichal Hocko 
19807118fc29SMike Kravetz retry:
19810c397daeSMichal Hocko 	if (hstate_is_gigantic(h))
19820c397daeSMichal Hocko 		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
19830c397daeSMichal Hocko 	else
19840c397daeSMichal Hocko 		page = alloc_buddy_huge_page(h, gfp_mask,
1985f60858f9SMike Kravetz 				nid, nmask, node_alloc_noretry);
19860c397daeSMichal Hocko 	if (!page)
19870c397daeSMichal Hocko 		return NULL;
19880c397daeSMichal Hocko 
19897118fc29SMike Kravetz 	if (hstate_is_gigantic(h)) {
19907118fc29SMike Kravetz 		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
19917118fc29SMike Kravetz 			/*
19927118fc29SMike Kravetz 			 * Rare failure to convert pages to compound page.
19937118fc29SMike Kravetz 			 * Free pages and try again - ONCE!
19947118fc29SMike Kravetz 			 */
19957118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
19967118fc29SMike Kravetz 			if (!retry) {
19977118fc29SMike Kravetz 				retry = true;
19987118fc29SMike Kravetz 				goto retry;
19997118fc29SMike Kravetz 			}
20007118fc29SMike Kravetz 			return NULL;
20017118fc29SMike Kravetz 		}
20027118fc29SMike Kravetz 	}
20030c397daeSMichal Hocko 	prep_new_huge_page(h, page, page_to_nid(page));
20040c397daeSMichal Hocko 
20050c397daeSMichal Hocko 	return page;
20060c397daeSMichal Hocko }
20070c397daeSMichal Hocko 
20080c397daeSMichal Hocko /*
2009af0fb9dfSMichal Hocko  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2010af0fb9dfSMichal Hocko  * manner.
2011af0fb9dfSMichal Hocko  */
2012f60858f9SMike Kravetz static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2013f60858f9SMike Kravetz 				nodemask_t *node_alloc_noretry)
2014b2261026SJoonsoo Kim {
2015b2261026SJoonsoo Kim 	struct page *page;
2016b2261026SJoonsoo Kim 	int nr_nodes, node;
2017af0fb9dfSMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2018b2261026SJoonsoo Kim 
2019b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2020f60858f9SMike Kravetz 		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
2021f60858f9SMike Kravetz 						node_alloc_noretry);
2022af0fb9dfSMichal Hocko 		if (page)
2023b2261026SJoonsoo Kim 			break;
2024b2261026SJoonsoo Kim 	}
2025b2261026SJoonsoo Kim 
2026af0fb9dfSMichal Hocko 	if (!page)
2027af0fb9dfSMichal Hocko 		return 0;
2028b2261026SJoonsoo Kim 
2029af0fb9dfSMichal Hocko 	put_page(page); /* free it into the hugepage allocator */
2030af0fb9dfSMichal Hocko 
2031af0fb9dfSMichal Hocko 	return 1;
2032b2261026SJoonsoo Kim }
2033b2261026SJoonsoo Kim 
2034e8c5c824SLee Schermerhorn /*
203510c6ec49SMike Kravetz  * Remove huge page from pool from next node to free.  Attempt to keep
203610c6ec49SMike Kravetz  * persistent huge pages more or less balanced over allowed nodes.
203710c6ec49SMike Kravetz  * This routine only 'removes' the hugetlb page.  The caller must make
203810c6ec49SMike Kravetz  * an additional call to free the page to low level allocators.
2039e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
2040e8c5c824SLee Schermerhorn  */
204110c6ec49SMike Kravetz static struct page *remove_pool_huge_page(struct hstate *h,
204210c6ec49SMike Kravetz 						nodemask_t *nodes_allowed,
20436ae11b27SLee Schermerhorn 						 bool acct_surplus)
2044e8c5c824SLee Schermerhorn {
2045b2261026SJoonsoo Kim 	int nr_nodes, node;
204610c6ec49SMike Kravetz 	struct page *page = NULL;
2047e8c5c824SLee Schermerhorn 
20489487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2049b2261026SJoonsoo Kim 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2050685f3457SLee Schermerhorn 		/*
2051685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
2052685f3457SLee Schermerhorn 		 * nodes with surplus pages.
2053685f3457SLee Schermerhorn 		 */
2054b2261026SJoonsoo Kim 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2055b2261026SJoonsoo Kim 		    !list_empty(&h->hugepage_freelists[node])) {
205610c6ec49SMike Kravetz 			page = list_entry(h->hugepage_freelists[node].next,
2057e8c5c824SLee Schermerhorn 					  struct page, lru);
20586eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, acct_surplus);
20599a76db09SLee Schermerhorn 			break;
2060e8c5c824SLee Schermerhorn 		}
2061b2261026SJoonsoo Kim 	}
2062e8c5c824SLee Schermerhorn 
206310c6ec49SMike Kravetz 	return page;
2064e8c5c824SLee Schermerhorn }
2065e8c5c824SLee Schermerhorn 
2066c8721bbbSNaoya Horiguchi /*
2067c8721bbbSNaoya Horiguchi  * Dissolve a given free hugepage into free buddy pages. This function does
2068faf53defSNaoya Horiguchi  * nothing for in-use hugepages and non-hugepages.
2069faf53defSNaoya Horiguchi  * This function returns values like below:
2070faf53defSNaoya Horiguchi  *
2071ad2fa371SMuchun Song  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2072ad2fa371SMuchun Song  *           when the system is under memory pressure and the feature of
2073ad2fa371SMuchun Song  *           freeing unused vmemmap pages associated with each hugetlb page
2074ad2fa371SMuchun Song  *           is enabled.
2075faf53defSNaoya Horiguchi  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2076faf53defSNaoya Horiguchi  *           (allocated or reserved.)
2077faf53defSNaoya Horiguchi  *       0:  successfully dissolved free hugepages or the page is not a
2078faf53defSNaoya Horiguchi  *           hugepage (considered as already dissolved)
2079c8721bbbSNaoya Horiguchi  */
2080c3114a84SAnshuman Khandual int dissolve_free_huge_page(struct page *page)
2081c8721bbbSNaoya Horiguchi {
20826bc9b564SNaoya Horiguchi 	int rc = -EBUSY;
2083082d5b6bSGerald Schaefer 
20847ffddd49SMuchun Song retry:
2085faf53defSNaoya Horiguchi 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2086faf53defSNaoya Horiguchi 	if (!PageHuge(page))
2087faf53defSNaoya Horiguchi 		return 0;
2088faf53defSNaoya Horiguchi 
2089db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2090faf53defSNaoya Horiguchi 	if (!PageHuge(page)) {
2091faf53defSNaoya Horiguchi 		rc = 0;
2092faf53defSNaoya Horiguchi 		goto out;
2093faf53defSNaoya Horiguchi 	}
2094faf53defSNaoya Horiguchi 
2095faf53defSNaoya Horiguchi 	if (!page_count(page)) {
20962247bb33SGerald Schaefer 		struct page *head = compound_head(page);
20972247bb33SGerald Schaefer 		struct hstate *h = page_hstate(head);
20986bc9b564SNaoya Horiguchi 		if (h->free_huge_pages - h->resv_huge_pages == 0)
2099082d5b6bSGerald Schaefer 			goto out;
21007ffddd49SMuchun Song 
21017ffddd49SMuchun Song 		/*
21027ffddd49SMuchun Song 		 * We should make sure that the page is already on the free list
21037ffddd49SMuchun Song 		 * when it is dissolved.
21047ffddd49SMuchun Song 		 */
21056c037149SMike Kravetz 		if (unlikely(!HPageFreed(head))) {
2106db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
21077ffddd49SMuchun Song 			cond_resched();
21087ffddd49SMuchun Song 
21097ffddd49SMuchun Song 			/*
21107ffddd49SMuchun Song 			 * Theoretically, we should return -EBUSY when we
21117ffddd49SMuchun Song 			 * encounter this race. In fact, we have a chance
21127ffddd49SMuchun Song 			 * to successfully dissolve the page if we do a
21137ffddd49SMuchun Song 			 * retry. Because the race window is quite small.
21147ffddd49SMuchun Song 			 * If we seize this opportunity, it is an optimization
21157ffddd49SMuchun Song 			 * for increasing the success rate of dissolving page.
21167ffddd49SMuchun Song 			 */
21177ffddd49SMuchun Song 			goto retry;
21187ffddd49SMuchun Song 		}
21197ffddd49SMuchun Song 
2120ad2fa371SMuchun Song 		remove_hugetlb_page(h, head, false);
2121ad2fa371SMuchun Song 		h->max_huge_pages--;
2122ad2fa371SMuchun Song 		spin_unlock_irq(&hugetlb_lock);
2123ad2fa371SMuchun Song 
2124c3114a84SAnshuman Khandual 		/*
2125ad2fa371SMuchun Song 		 * Normally update_and_free_page will allocate required vmemmmap
2126ad2fa371SMuchun Song 		 * before freeing the page.  update_and_free_page will fail to
2127ad2fa371SMuchun Song 		 * free the page if it can not allocate required vmemmap.  We
2128ad2fa371SMuchun Song 		 * need to adjust max_huge_pages if the page is not freed.
2129ad2fa371SMuchun Song 		 * Attempt to allocate vmemmmap here so that we can take
2130ad2fa371SMuchun Song 		 * appropriate action on failure.
2131ad2fa371SMuchun Song 		 */
21326213834cSMuchun Song 		rc = hugetlb_vmemmap_restore(h, head);
2133ad2fa371SMuchun Song 		if (!rc) {
2134b65d4adbSMuchun Song 			update_and_free_page(h, head, false);
2135ad2fa371SMuchun Song 		} else {
2136ad2fa371SMuchun Song 			spin_lock_irq(&hugetlb_lock);
2137ad2fa371SMuchun Song 			add_hugetlb_page(h, head, false);
2138ad2fa371SMuchun Song 			h->max_huge_pages++;
2139ad2fa371SMuchun Song 			spin_unlock_irq(&hugetlb_lock);
2140ad2fa371SMuchun Song 		}
2141ad2fa371SMuchun Song 
2142ad2fa371SMuchun Song 		return rc;
2143c8721bbbSNaoya Horiguchi 	}
2144082d5b6bSGerald Schaefer out:
2145db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2146082d5b6bSGerald Schaefer 	return rc;
2147c8721bbbSNaoya Horiguchi }
2148c8721bbbSNaoya Horiguchi 
2149c8721bbbSNaoya Horiguchi /*
2150c8721bbbSNaoya Horiguchi  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2151c8721bbbSNaoya Horiguchi  * make specified memory blocks removable from the system.
21522247bb33SGerald Schaefer  * Note that this will dissolve a free gigantic hugepage completely, if any
21532247bb33SGerald Schaefer  * part of it lies within the given range.
2154082d5b6bSGerald Schaefer  * Also note that if dissolve_free_huge_page() returns with an error, all
2155082d5b6bSGerald Schaefer  * free hugepages that were dissolved before that error are lost.
2156c8721bbbSNaoya Horiguchi  */
2157082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2158c8721bbbSNaoya Horiguchi {
2159c8721bbbSNaoya Horiguchi 	unsigned long pfn;
2160eb03aa00SGerald Schaefer 	struct page *page;
2161082d5b6bSGerald Schaefer 	int rc = 0;
2162dc2628f3SMuchun Song 	unsigned int order;
2163dc2628f3SMuchun Song 	struct hstate *h;
2164c8721bbbSNaoya Horiguchi 
2165d0177639SLi Zhong 	if (!hugepages_supported())
2166082d5b6bSGerald Schaefer 		return rc;
2167d0177639SLi Zhong 
2168dc2628f3SMuchun Song 	order = huge_page_order(&default_hstate);
2169dc2628f3SMuchun Song 	for_each_hstate(h)
2170dc2628f3SMuchun Song 		order = min(order, huge_page_order(h));
2171dc2628f3SMuchun Song 
2172dc2628f3SMuchun Song 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2173eb03aa00SGerald Schaefer 		page = pfn_to_page(pfn);
2174eb03aa00SGerald Schaefer 		rc = dissolve_free_huge_page(page);
2175eb03aa00SGerald Schaefer 		if (rc)
2176082d5b6bSGerald Schaefer 			break;
2177eb03aa00SGerald Schaefer 	}
2178082d5b6bSGerald Schaefer 
2179082d5b6bSGerald Schaefer 	return rc;
2180c8721bbbSNaoya Horiguchi }
2181c8721bbbSNaoya Horiguchi 
2182ab5ac90aSMichal Hocko /*
2183ab5ac90aSMichal Hocko  * Allocates a fresh surplus page from the page allocator.
2184ab5ac90aSMichal Hocko  */
21850c397daeSMichal Hocko static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
2186b65a4edaSMike Kravetz 		int nid, nodemask_t *nmask, bool zero_ref)
21877893d1d5SAdam Litke {
21889980d744SMichal Hocko 	struct page *page = NULL;
2189b65a4edaSMike Kravetz 	bool retry = false;
21907893d1d5SAdam Litke 
2191bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
2192aa888a74SAndi Kleen 		return NULL;
2193aa888a74SAndi Kleen 
2194db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
21959980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
21969980d744SMichal Hocko 		goto out_unlock;
2197db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2198d1c3fb1fSNishanth Aravamudan 
2199b65a4edaSMike Kravetz retry:
2200f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
22019980d744SMichal Hocko 	if (!page)
22020c397daeSMichal Hocko 		return NULL;
2203d1c3fb1fSNishanth Aravamudan 
2204db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
22059980d744SMichal Hocko 	/*
22069980d744SMichal Hocko 	 * We could have raced with the pool size change.
22079980d744SMichal Hocko 	 * Double check that and simply deallocate the new page
22089980d744SMichal Hocko 	 * if we would end up overcommiting the surpluses. Abuse
22099980d744SMichal Hocko 	 * temporary page to workaround the nasty free_huge_page
22109980d744SMichal Hocko 	 * codeflow
22119980d744SMichal Hocko 	 */
22129980d744SMichal Hocko 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
22139157c311SMike Kravetz 		SetHPageTemporary(page);
2214db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
22159980d744SMichal Hocko 		put_page(page);
22162bf753e6SKai Shen 		return NULL;
2217b65a4edaSMike Kravetz 	}
2218b65a4edaSMike Kravetz 
2219b65a4edaSMike Kravetz 	if (zero_ref) {
2220b65a4edaSMike Kravetz 		/*
2221b65a4edaSMike Kravetz 		 * Caller requires a page with zero ref count.
2222b65a4edaSMike Kravetz 		 * We will drop ref count here.  If someone else is holding
2223b65a4edaSMike Kravetz 		 * a ref, the page will be freed when they drop it.  Abuse
2224b65a4edaSMike Kravetz 		 * temporary page flag to accomplish this.
2225b65a4edaSMike Kravetz 		 */
2226b65a4edaSMike Kravetz 		SetHPageTemporary(page);
2227b65a4edaSMike Kravetz 		if (!put_page_testzero(page)) {
2228b65a4edaSMike Kravetz 			/*
2229b65a4edaSMike Kravetz 			 * Unexpected inflated ref count on freshly allocated
2230b65a4edaSMike Kravetz 			 * huge.  Retry once.
2231b65a4edaSMike Kravetz 			 */
2232b65a4edaSMike Kravetz 			pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2233b65a4edaSMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
2234b65a4edaSMike Kravetz 			if (retry)
2235b65a4edaSMike Kravetz 				return NULL;
2236b65a4edaSMike Kravetz 
2237b65a4edaSMike Kravetz 			retry = true;
2238b65a4edaSMike Kravetz 			goto retry;
2239b65a4edaSMike Kravetz 		}
2240b65a4edaSMike Kravetz 		ClearHPageTemporary(page);
2241b65a4edaSMike Kravetz 	}
2242b65a4edaSMike Kravetz 
22439980d744SMichal Hocko 	h->surplus_huge_pages++;
22444704dea3SMichal Hocko 	h->surplus_huge_pages_node[page_to_nid(page)]++;
22459980d744SMichal Hocko 
22469980d744SMichal Hocko out_unlock:
2247db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
22487893d1d5SAdam Litke 
22497893d1d5SAdam Litke 	return page;
22507893d1d5SAdam Litke }
22517893d1d5SAdam Litke 
2252bbe88753SJoonsoo Kim static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
2253ab5ac90aSMichal Hocko 				     int nid, nodemask_t *nmask)
2254ab5ac90aSMichal Hocko {
2255ab5ac90aSMichal Hocko 	struct page *page;
2256ab5ac90aSMichal Hocko 
2257ab5ac90aSMichal Hocko 	if (hstate_is_gigantic(h))
2258ab5ac90aSMichal Hocko 		return NULL;
2259ab5ac90aSMichal Hocko 
2260f60858f9SMike Kravetz 	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
2261ab5ac90aSMichal Hocko 	if (!page)
2262ab5ac90aSMichal Hocko 		return NULL;
2263ab5ac90aSMichal Hocko 
2264ab5ac90aSMichal Hocko 	/*
2265ab5ac90aSMichal Hocko 	 * We do not account these pages as surplus because they are only
2266ab5ac90aSMichal Hocko 	 * temporary and will be released properly on the last reference
2267ab5ac90aSMichal Hocko 	 */
22689157c311SMike Kravetz 	SetHPageTemporary(page);
2269ab5ac90aSMichal Hocko 
2270ab5ac90aSMichal Hocko 	return page;
2271ab5ac90aSMichal Hocko }
2272ab5ac90aSMichal Hocko 
2273e4e574b7SAdam Litke /*
2274099730d6SDave Hansen  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2275099730d6SDave Hansen  */
2276e0ec90eeSDave Hansen static
22770c397daeSMichal Hocko struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
2278099730d6SDave Hansen 		struct vm_area_struct *vma, unsigned long addr)
2279099730d6SDave Hansen {
2280cfcaa66fSBen Widawsky 	struct page *page = NULL;
2281aaf14e40SMichal Hocko 	struct mempolicy *mpol;
2282aaf14e40SMichal Hocko 	gfp_t gfp_mask = htlb_alloc_mask(h);
2283aaf14e40SMichal Hocko 	int nid;
2284aaf14e40SMichal Hocko 	nodemask_t *nodemask;
2285aaf14e40SMichal Hocko 
2286aaf14e40SMichal Hocko 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2287cfcaa66fSBen Widawsky 	if (mpol_is_preferred_many(mpol)) {
2288cfcaa66fSBen Widawsky 		gfp_t gfp = gfp_mask | __GFP_NOWARN;
2289cfcaa66fSBen Widawsky 
2290cfcaa66fSBen Widawsky 		gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2291cfcaa66fSBen Widawsky 		page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
2292cfcaa66fSBen Widawsky 
2293cfcaa66fSBen Widawsky 		/* Fallback to all nodes if page==NULL */
2294cfcaa66fSBen Widawsky 		nodemask = NULL;
2295cfcaa66fSBen Widawsky 	}
2296cfcaa66fSBen Widawsky 
2297cfcaa66fSBen Widawsky 	if (!page)
2298b65a4edaSMike Kravetz 		page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2299aaf14e40SMichal Hocko 	mpol_cond_put(mpol);
2300aaf14e40SMichal Hocko 	return page;
2301099730d6SDave Hansen }
2302099730d6SDave Hansen 
2303ab5ac90aSMichal Hocko /* page migration callback function */
23043e59fcb0SMichal Hocko struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
2305d92bbc27SJoonsoo Kim 		nodemask_t *nmask, gfp_t gfp_mask)
23064db9b2efSMichal Hocko {
2307db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
23084db9b2efSMichal Hocko 	if (h->free_huge_pages - h->resv_huge_pages > 0) {
23093e59fcb0SMichal Hocko 		struct page *page;
23103e59fcb0SMichal Hocko 
23113e59fcb0SMichal Hocko 		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
23123e59fcb0SMichal Hocko 		if (page) {
2313db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
23143e59fcb0SMichal Hocko 			return page;
23154db9b2efSMichal Hocko 		}
23164db9b2efSMichal Hocko 	}
2317db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
23184db9b2efSMichal Hocko 
23190c397daeSMichal Hocko 	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
23204db9b2efSMichal Hocko }
23214db9b2efSMichal Hocko 
2322ebd63723SMichal Hocko /* mempolicy aware migration callback */
2323389c8178SMichal Hocko struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2324389c8178SMichal Hocko 		unsigned long address)
2325ebd63723SMichal Hocko {
2326ebd63723SMichal Hocko 	struct mempolicy *mpol;
2327ebd63723SMichal Hocko 	nodemask_t *nodemask;
2328ebd63723SMichal Hocko 	struct page *page;
2329ebd63723SMichal Hocko 	gfp_t gfp_mask;
2330ebd63723SMichal Hocko 	int node;
2331ebd63723SMichal Hocko 
2332ebd63723SMichal Hocko 	gfp_mask = htlb_alloc_mask(h);
2333ebd63723SMichal Hocko 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2334d92bbc27SJoonsoo Kim 	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2335ebd63723SMichal Hocko 	mpol_cond_put(mpol);
2336ebd63723SMichal Hocko 
2337ebd63723SMichal Hocko 	return page;
2338ebd63723SMichal Hocko }
2339ebd63723SMichal Hocko 
2340bf50bab2SNaoya Horiguchi /*
234125985edcSLucas De Marchi  * Increase the hugetlb pool such that it can accommodate a reservation
2342e4e574b7SAdam Litke  * of size 'delta'.
2343e4e574b7SAdam Litke  */
23440a4f3d1bSLiu Xiang static int gather_surplus_pages(struct hstate *h, long delta)
23451b2a1e7bSJules Irenge 	__must_hold(&hugetlb_lock)
2346e4e574b7SAdam Litke {
234734665341SMiaohe Lin 	LIST_HEAD(surplus_list);
2348e4e574b7SAdam Litke 	struct page *page, *tmp;
23490a4f3d1bSLiu Xiang 	int ret;
23500a4f3d1bSLiu Xiang 	long i;
23510a4f3d1bSLiu Xiang 	long needed, allocated;
235228073b02SHillf Danton 	bool alloc_ok = true;
2353e4e574b7SAdam Litke 
23549487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
2355a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2356ac09b3a1SAdam Litke 	if (needed <= 0) {
2357a5516438SAndi Kleen 		h->resv_huge_pages += delta;
2358e4e574b7SAdam Litke 		return 0;
2359ac09b3a1SAdam Litke 	}
2360e4e574b7SAdam Litke 
2361e4e574b7SAdam Litke 	allocated = 0;
2362e4e574b7SAdam Litke 
2363e4e574b7SAdam Litke 	ret = -ENOMEM;
2364e4e574b7SAdam Litke retry:
2365db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2366e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
23670c397daeSMichal Hocko 		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2368b65a4edaSMike Kravetz 				NUMA_NO_NODE, NULL, true);
236928073b02SHillf Danton 		if (!page) {
237028073b02SHillf Danton 			alloc_ok = false;
237128073b02SHillf Danton 			break;
237228073b02SHillf Danton 		}
2373e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
237469ed779aSDavid Rientjes 		cond_resched();
2375e4e574b7SAdam Litke 	}
237628073b02SHillf Danton 	allocated += i;
2377e4e574b7SAdam Litke 
2378e4e574b7SAdam Litke 	/*
2379e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2380e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
2381e4e574b7SAdam Litke 	 */
2382db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2383a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
2384a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
238528073b02SHillf Danton 	if (needed > 0) {
238628073b02SHillf Danton 		if (alloc_ok)
2387e4e574b7SAdam Litke 			goto retry;
238828073b02SHillf Danton 		/*
238928073b02SHillf Danton 		 * We were not able to allocate enough pages to
239028073b02SHillf Danton 		 * satisfy the entire reservation so we free what
239128073b02SHillf Danton 		 * we've allocated so far.
239228073b02SHillf Danton 		 */
239328073b02SHillf Danton 		goto free;
239428073b02SHillf Danton 	}
2395e4e574b7SAdam Litke 	/*
2396e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
239725985edcSLucas De Marchi 	 * needed to accommodate the reservation.  Add the appropriate number
2398e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
2399ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
2400ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
2401ac09b3a1SAdam Litke 	 * before they are reserved.
2402e4e574b7SAdam Litke 	 */
2403e4e574b7SAdam Litke 	needed += allocated;
2404a5516438SAndi Kleen 	h->resv_huge_pages += delta;
2405e4e574b7SAdam Litke 	ret = 0;
2406a9869b83SNaoya Horiguchi 
240719fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
240819fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
240919fc3f0aSAdam Litke 		if ((--needed) < 0)
241019fc3f0aSAdam Litke 			break;
2411b65a4edaSMike Kravetz 		/* Add the page to the hugetlb allocator */
2412a5516438SAndi Kleen 		enqueue_huge_page(h, page);
241319fc3f0aSAdam Litke 	}
241428073b02SHillf Danton free:
2415db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
241619fc3f0aSAdam Litke 
2417b65a4edaSMike Kravetz 	/*
2418b65a4edaSMike Kravetz 	 * Free unnecessary surplus pages to the buddy allocator.
2419b65a4edaSMike Kravetz 	 * Pages have no ref count, call free_huge_page directly.
2420b65a4edaSMike Kravetz 	 */
2421c0d934baSJoonsoo Kim 	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2422b65a4edaSMike Kravetz 		free_huge_page(page);
2423db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2424e4e574b7SAdam Litke 
2425e4e574b7SAdam Litke 	return ret;
2426e4e574b7SAdam Litke }
2427e4e574b7SAdam Litke 
2428e4e574b7SAdam Litke /*
2429e5bbc8a6SMike Kravetz  * This routine has two main purposes:
2430e5bbc8a6SMike Kravetz  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2431e5bbc8a6SMike Kravetz  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2432e5bbc8a6SMike Kravetz  *    to the associated reservation map.
2433e5bbc8a6SMike Kravetz  * 2) Free any unused surplus pages that may have been allocated to satisfy
2434e5bbc8a6SMike Kravetz  *    the reservation.  As many as unused_resv_pages may be freed.
2435e4e574b7SAdam Litke  */
2436a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
2437a5516438SAndi Kleen 					unsigned long unused_resv_pages)
2438e4e574b7SAdam Litke {
2439e4e574b7SAdam Litke 	unsigned long nr_pages;
244010c6ec49SMike Kravetz 	struct page *page;
244110c6ec49SMike Kravetz 	LIST_HEAD(page_list);
244210c6ec49SMike Kravetz 
24439487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
244410c6ec49SMike Kravetz 	/* Uncommit the reservation */
244510c6ec49SMike Kravetz 	h->resv_huge_pages -= unused_resv_pages;
2446e4e574b7SAdam Litke 
2447c0531714SNaoya Horiguchi 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2448e5bbc8a6SMike Kravetz 		goto out;
2449aa888a74SAndi Kleen 
2450e5bbc8a6SMike Kravetz 	/*
2451e5bbc8a6SMike Kravetz 	 * Part (or even all) of the reservation could have been backed
2452e5bbc8a6SMike Kravetz 	 * by pre-allocated pages. Only free surplus pages.
2453e5bbc8a6SMike Kravetz 	 */
2454a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2455e4e574b7SAdam Litke 
2456685f3457SLee Schermerhorn 	/*
2457685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
24589b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
24599b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
24609b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
246110c6ec49SMike Kravetz 	 * remove_pool_huge_page() will balance the freed pages across the
24629b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
2463685f3457SLee Schermerhorn 	 */
2464685f3457SLee Schermerhorn 	while (nr_pages--) {
246510c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
246610c6ec49SMike Kravetz 		if (!page)
2467e5bbc8a6SMike Kravetz 			goto out;
246810c6ec49SMike Kravetz 
246910c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
2470e4e574b7SAdam Litke 	}
2471e5bbc8a6SMike Kravetz 
2472e5bbc8a6SMike Kravetz out:
2473db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
247410c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
2475db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2476e4e574b7SAdam Litke }
2477e4e574b7SAdam Litke 
24785e911373SMike Kravetz 
2479c37f9fb1SAndy Whitcroft /*
2480feba16e2SMike Kravetz  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
24815e911373SMike Kravetz  * are used by the huge page allocation routines to manage reservations.
2482cf3ad20bSMike Kravetz  *
2483cf3ad20bSMike Kravetz  * vma_needs_reservation is called to determine if the huge page at addr
2484cf3ad20bSMike Kravetz  * within the vma has an associated reservation.  If a reservation is
2485cf3ad20bSMike Kravetz  * needed, the value 1 is returned.  The caller is then responsible for
2486cf3ad20bSMike Kravetz  * managing the global reservation and subpool usage counts.  After
2487cf3ad20bSMike Kravetz  * the huge page has been allocated, vma_commit_reservation is called
2488feba16e2SMike Kravetz  * to add the page to the reservation map.  If the page allocation fails,
2489feba16e2SMike Kravetz  * the reservation must be ended instead of committed.  vma_end_reservation
2490feba16e2SMike Kravetz  * is called in such cases.
2491cf3ad20bSMike Kravetz  *
2492cf3ad20bSMike Kravetz  * In the normal case, vma_commit_reservation returns the same value
2493cf3ad20bSMike Kravetz  * as the preceding vma_needs_reservation call.  The only time this
2494cf3ad20bSMike Kravetz  * is not the case is if a reserve map was changed between calls.  It
2495cf3ad20bSMike Kravetz  * is the responsibility of the caller to notice the difference and
2496cf3ad20bSMike Kravetz  * take appropriate action.
249796b96a96SMike Kravetz  *
249896b96a96SMike Kravetz  * vma_add_reservation is used in error paths where a reservation must
249996b96a96SMike Kravetz  * be restored when a newly allocated huge page must be freed.  It is
250096b96a96SMike Kravetz  * to be called after calling vma_needs_reservation to determine if a
250196b96a96SMike Kravetz  * reservation exists.
2502846be085SMike Kravetz  *
2503846be085SMike Kravetz  * vma_del_reservation is used in error paths where an entry in the reserve
2504846be085SMike Kravetz  * map was created during huge page allocation and must be removed.  It is to
2505846be085SMike Kravetz  * be called after calling vma_needs_reservation to determine if a reservation
2506846be085SMike Kravetz  * exists.
2507c37f9fb1SAndy Whitcroft  */
25085e911373SMike Kravetz enum vma_resv_mode {
25095e911373SMike Kravetz 	VMA_NEEDS_RESV,
25105e911373SMike Kravetz 	VMA_COMMIT_RESV,
2511feba16e2SMike Kravetz 	VMA_END_RESV,
251296b96a96SMike Kravetz 	VMA_ADD_RESV,
2513846be085SMike Kravetz 	VMA_DEL_RESV,
25145e911373SMike Kravetz };
2515cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
2516cf3ad20bSMike Kravetz 				struct vm_area_struct *vma, unsigned long addr,
25175e911373SMike Kravetz 				enum vma_resv_mode mode)
2518c37f9fb1SAndy Whitcroft {
25194e35f483SJoonsoo Kim 	struct resv_map *resv;
25204e35f483SJoonsoo Kim 	pgoff_t idx;
2521cf3ad20bSMike Kravetz 	long ret;
25220db9d74eSMina Almasry 	long dummy_out_regions_needed;
2523c37f9fb1SAndy Whitcroft 
25244e35f483SJoonsoo Kim 	resv = vma_resv_map(vma);
25254e35f483SJoonsoo Kim 	if (!resv)
2526c37f9fb1SAndy Whitcroft 		return 1;
2527c37f9fb1SAndy Whitcroft 
25284e35f483SJoonsoo Kim 	idx = vma_hugecache_offset(h, vma, addr);
25295e911373SMike Kravetz 	switch (mode) {
25305e911373SMike Kravetz 	case VMA_NEEDS_RESV:
25310db9d74eSMina Almasry 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
25320db9d74eSMina Almasry 		/* We assume that vma_reservation_* routines always operate on
25330db9d74eSMina Almasry 		 * 1 page, and that adding to resv map a 1 page entry can only
25340db9d74eSMina Almasry 		 * ever require 1 region.
25350db9d74eSMina Almasry 		 */
25360db9d74eSMina Almasry 		VM_BUG_ON(dummy_out_regions_needed != 1);
25375e911373SMike Kravetz 		break;
25385e911373SMike Kravetz 	case VMA_COMMIT_RESV:
2539075a61d0SMina Almasry 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25400db9d74eSMina Almasry 		/* region_add calls of range 1 should never fail. */
25410db9d74eSMina Almasry 		VM_BUG_ON(ret < 0);
25425e911373SMike Kravetz 		break;
2543feba16e2SMike Kravetz 	case VMA_END_RESV:
25440db9d74eSMina Almasry 		region_abort(resv, idx, idx + 1, 1);
25455e911373SMike Kravetz 		ret = 0;
25465e911373SMike Kravetz 		break;
254796b96a96SMike Kravetz 	case VMA_ADD_RESV:
25480db9d74eSMina Almasry 		if (vma->vm_flags & VM_MAYSHARE) {
2549075a61d0SMina Almasry 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
25500db9d74eSMina Almasry 			/* region_add calls of range 1 should never fail. */
25510db9d74eSMina Almasry 			VM_BUG_ON(ret < 0);
25520db9d74eSMina Almasry 		} else {
25530db9d74eSMina Almasry 			region_abort(resv, idx, idx + 1, 1);
255496b96a96SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
255596b96a96SMike Kravetz 		}
255696b96a96SMike Kravetz 		break;
2557846be085SMike Kravetz 	case VMA_DEL_RESV:
2558846be085SMike Kravetz 		if (vma->vm_flags & VM_MAYSHARE) {
2559846be085SMike Kravetz 			region_abort(resv, idx, idx + 1, 1);
2560846be085SMike Kravetz 			ret = region_del(resv, idx, idx + 1);
2561846be085SMike Kravetz 		} else {
2562846be085SMike Kravetz 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2563846be085SMike Kravetz 			/* region_add calls of range 1 should never fail. */
2564846be085SMike Kravetz 			VM_BUG_ON(ret < 0);
2565846be085SMike Kravetz 		}
2566846be085SMike Kravetz 		break;
25675e911373SMike Kravetz 	default:
25685e911373SMike Kravetz 		BUG();
25695e911373SMike Kravetz 	}
257084afd99bSAndy Whitcroft 
2571846be085SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2572cf3ad20bSMike Kravetz 		return ret;
257367961f9dSMike Kravetz 	/*
2574bf3d12b9SMiaohe Lin 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2575bf3d12b9SMiaohe Lin 	 *
257667961f9dSMike Kravetz 	 * In most cases, reserves always exist for private mappings.
257767961f9dSMike Kravetz 	 * However, a file associated with mapping could have been
257867961f9dSMike Kravetz 	 * hole punched or truncated after reserves were consumed.
257967961f9dSMike Kravetz 	 * As subsequent fault on such a range will not use reserves.
258067961f9dSMike Kravetz 	 * Subtle - The reserve map for private mappings has the
258167961f9dSMike Kravetz 	 * opposite meaning than that of shared mappings.  If NO
258267961f9dSMike Kravetz 	 * entry is in the reserve map, it means a reservation exists.
258367961f9dSMike Kravetz 	 * If an entry exists in the reserve map, it means the
258467961f9dSMike Kravetz 	 * reservation has already been consumed.  As a result, the
258567961f9dSMike Kravetz 	 * return value of this routine is the opposite of the
258667961f9dSMike Kravetz 	 * value returned from reserve map manipulation routines above.
258767961f9dSMike Kravetz 	 */
2588bf3d12b9SMiaohe Lin 	if (ret > 0)
258967961f9dSMike Kravetz 		return 0;
2590bf3d12b9SMiaohe Lin 	if (ret == 0)
259167961f9dSMike Kravetz 		return 1;
2592bf3d12b9SMiaohe Lin 	return ret;
259384afd99bSAndy Whitcroft }
2594cf3ad20bSMike Kravetz 
2595cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
2596a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
2597c37f9fb1SAndy Whitcroft {
25985e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2599cf3ad20bSMike Kravetz }
2600c37f9fb1SAndy Whitcroft 
2601cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
2602cf3ad20bSMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
2603cf3ad20bSMike Kravetz {
26045e911373SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
26055e911373SMike Kravetz }
26065e911373SMike Kravetz 
2607feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
26085e911373SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
26095e911373SMike Kravetz {
2610feba16e2SMike Kravetz 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2611c37f9fb1SAndy Whitcroft }
2612c37f9fb1SAndy Whitcroft 
261396b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
261496b96a96SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
261596b96a96SMike Kravetz {
261696b96a96SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
261796b96a96SMike Kravetz }
261896b96a96SMike Kravetz 
2619846be085SMike Kravetz static long vma_del_reservation(struct hstate *h,
2620846be085SMike Kravetz 			struct vm_area_struct *vma, unsigned long addr)
262196b96a96SMike Kravetz {
2622846be085SMike Kravetz 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2623846be085SMike Kravetz }
2624846be085SMike Kravetz 
2625846be085SMike Kravetz /*
2626846be085SMike Kravetz  * This routine is called to restore reservation information on error paths.
2627846be085SMike Kravetz  * It should ONLY be called for pages allocated via alloc_huge_page(), and
2628846be085SMike Kravetz  * the hugetlb mutex should remain held when calling this routine.
2629846be085SMike Kravetz  *
2630846be085SMike Kravetz  * It handles two specific cases:
2631846be085SMike Kravetz  * 1) A reservation was in place and the page consumed the reservation.
2632846be085SMike Kravetz  *    HPageRestoreReserve is set in the page.
2633846be085SMike Kravetz  * 2) No reservation was in place for the page, so HPageRestoreReserve is
2634846be085SMike Kravetz  *    not set.  However, alloc_huge_page always updates the reserve map.
2635846be085SMike Kravetz  *
2636846be085SMike Kravetz  * In case 1, free_huge_page later in the error path will increment the
2637846be085SMike Kravetz  * global reserve count.  But, free_huge_page does not have enough context
2638846be085SMike Kravetz  * to adjust the reservation map.  This case deals primarily with private
2639846be085SMike Kravetz  * mappings.  Adjust the reserve map here to be consistent with global
2640846be085SMike Kravetz  * reserve count adjustments to be made by free_huge_page.  Make sure the
2641846be085SMike Kravetz  * reserve map indicates there is a reservation present.
2642846be085SMike Kravetz  *
2643846be085SMike Kravetz  * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2644846be085SMike Kravetz  */
2645846be085SMike Kravetz void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2646846be085SMike Kravetz 			unsigned long address, struct page *page)
2647846be085SMike Kravetz {
264896b96a96SMike Kravetz 	long rc = vma_needs_reservation(h, vma, address);
264996b96a96SMike Kravetz 
2650846be085SMike Kravetz 	if (HPageRestoreReserve(page)) {
2651846be085SMike Kravetz 		if (unlikely(rc < 0))
265296b96a96SMike Kravetz 			/*
265396b96a96SMike Kravetz 			 * Rare out of memory condition in reserve map
2654d6995da3SMike Kravetz 			 * manipulation.  Clear HPageRestoreReserve so that
265596b96a96SMike Kravetz 			 * global reserve count will not be incremented
265696b96a96SMike Kravetz 			 * by free_huge_page.  This will make it appear
265796b96a96SMike Kravetz 			 * as though the reservation for this page was
265896b96a96SMike Kravetz 			 * consumed.  This may prevent the task from
265996b96a96SMike Kravetz 			 * faulting in the page at a later time.  This
266096b96a96SMike Kravetz 			 * is better than inconsistent global huge page
266196b96a96SMike Kravetz 			 * accounting of reserve counts.
266296b96a96SMike Kravetz 			 */
2663d6995da3SMike Kravetz 			ClearHPageRestoreReserve(page);
2664846be085SMike Kravetz 		else if (rc)
2665846be085SMike Kravetz 			(void)vma_add_reservation(h, vma, address);
2666846be085SMike Kravetz 		else
2667846be085SMike Kravetz 			vma_end_reservation(h, vma, address);
2668846be085SMike Kravetz 	} else {
2669846be085SMike Kravetz 		if (!rc) {
267096b96a96SMike Kravetz 			/*
2671846be085SMike Kravetz 			 * This indicates there is an entry in the reserve map
2672c7b1850dSMike Kravetz 			 * not added by alloc_huge_page.  We know it was added
2673846be085SMike Kravetz 			 * before the alloc_huge_page call, otherwise
2674846be085SMike Kravetz 			 * HPageRestoreReserve would be set on the page.
2675846be085SMike Kravetz 			 * Remove the entry so that a subsequent allocation
2676846be085SMike Kravetz 			 * does not consume a reservation.
267796b96a96SMike Kravetz 			 */
2678846be085SMike Kravetz 			rc = vma_del_reservation(h, vma, address);
2679846be085SMike Kravetz 			if (rc < 0)
2680846be085SMike Kravetz 				/*
2681846be085SMike Kravetz 				 * VERY rare out of memory condition.  Since
2682846be085SMike Kravetz 				 * we can not delete the entry, set
2683846be085SMike Kravetz 				 * HPageRestoreReserve so that the reserve
2684846be085SMike Kravetz 				 * count will be incremented when the page
2685846be085SMike Kravetz 				 * is freed.  This reserve will be consumed
2686846be085SMike Kravetz 				 * on a subsequent allocation.
2687846be085SMike Kravetz 				 */
2688846be085SMike Kravetz 				SetHPageRestoreReserve(page);
2689846be085SMike Kravetz 		} else if (rc < 0) {
2690846be085SMike Kravetz 			/*
2691846be085SMike Kravetz 			 * Rare out of memory condition from
2692846be085SMike Kravetz 			 * vma_needs_reservation call.  Memory allocation is
2693846be085SMike Kravetz 			 * only attempted if a new entry is needed.  Therefore,
2694846be085SMike Kravetz 			 * this implies there is not an entry in the
2695846be085SMike Kravetz 			 * reserve map.
2696846be085SMike Kravetz 			 *
2697846be085SMike Kravetz 			 * For shared mappings, no entry in the map indicates
2698846be085SMike Kravetz 			 * no reservation.  We are done.
2699846be085SMike Kravetz 			 */
2700846be085SMike Kravetz 			if (!(vma->vm_flags & VM_MAYSHARE))
2701846be085SMike Kravetz 				/*
2702846be085SMike Kravetz 				 * For private mappings, no entry indicates
2703846be085SMike Kravetz 				 * a reservation is present.  Since we can
2704846be085SMike Kravetz 				 * not add an entry, set SetHPageRestoreReserve
2705846be085SMike Kravetz 				 * on the page so reserve count will be
2706846be085SMike Kravetz 				 * incremented when freed.  This reserve will
2707846be085SMike Kravetz 				 * be consumed on a subsequent allocation.
2708846be085SMike Kravetz 				 */
2709846be085SMike Kravetz 				SetHPageRestoreReserve(page);
271096b96a96SMike Kravetz 		} else
2711846be085SMike Kravetz 			/*
2712846be085SMike Kravetz 			 * No reservation present, do nothing
2713846be085SMike Kravetz 			 */
271496b96a96SMike Kravetz 			 vma_end_reservation(h, vma, address);
271596b96a96SMike Kravetz 	}
271696b96a96SMike Kravetz }
271796b96a96SMike Kravetz 
2718369fa227SOscar Salvador /*
2719369fa227SOscar Salvador  * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2720369fa227SOscar Salvador  * @h: struct hstate old page belongs to
2721369fa227SOscar Salvador  * @old_page: Old page to dissolve
2722ae37c7ffSOscar Salvador  * @list: List to isolate the page in case we need to
2723369fa227SOscar Salvador  * Returns 0 on success, otherwise negated error.
2724369fa227SOscar Salvador  */
2725ae37c7ffSOscar Salvador static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2726ae37c7ffSOscar Salvador 					struct list_head *list)
2727369fa227SOscar Salvador {
2728369fa227SOscar Salvador 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2729369fa227SOscar Salvador 	int nid = page_to_nid(old_page);
2730b65a4edaSMike Kravetz 	bool alloc_retry = false;
2731369fa227SOscar Salvador 	struct page *new_page;
2732369fa227SOscar Salvador 	int ret = 0;
2733369fa227SOscar Salvador 
2734369fa227SOscar Salvador 	/*
2735369fa227SOscar Salvador 	 * Before dissolving the page, we need to allocate a new one for the
2736f41f2ed4SMuchun Song 	 * pool to remain stable.  Here, we allocate the page and 'prep' it
2737f41f2ed4SMuchun Song 	 * by doing everything but actually updating counters and adding to
2738f41f2ed4SMuchun Song 	 * the pool.  This simplifies and let us do most of the processing
2739f41f2ed4SMuchun Song 	 * under the lock.
2740369fa227SOscar Salvador 	 */
2741b65a4edaSMike Kravetz alloc_retry:
2742369fa227SOscar Salvador 	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
2743369fa227SOscar Salvador 	if (!new_page)
2744369fa227SOscar Salvador 		return -ENOMEM;
2745b65a4edaSMike Kravetz 	/*
2746b65a4edaSMike Kravetz 	 * If all goes well, this page will be directly added to the free
2747b65a4edaSMike Kravetz 	 * list in the pool.  For this the ref count needs to be zero.
2748b65a4edaSMike Kravetz 	 * Attempt to drop now, and retry once if needed.  It is VERY
2749b65a4edaSMike Kravetz 	 * unlikely there is another ref on the page.
2750b65a4edaSMike Kravetz 	 *
2751b65a4edaSMike Kravetz 	 * If someone else has a reference to the page, it will be freed
2752b65a4edaSMike Kravetz 	 * when they drop their ref.  Abuse temporary page flag to accomplish
2753b65a4edaSMike Kravetz 	 * this.  Retry once if there is an inflated ref count.
2754b65a4edaSMike Kravetz 	 */
2755b65a4edaSMike Kravetz 	SetHPageTemporary(new_page);
2756b65a4edaSMike Kravetz 	if (!put_page_testzero(new_page)) {
2757b65a4edaSMike Kravetz 		if (alloc_retry)
2758b65a4edaSMike Kravetz 			return -EBUSY;
2759b65a4edaSMike Kravetz 
2760b65a4edaSMike Kravetz 		alloc_retry = true;
2761b65a4edaSMike Kravetz 		goto alloc_retry;
2762b65a4edaSMike Kravetz 	}
2763b65a4edaSMike Kravetz 	ClearHPageTemporary(new_page);
2764b65a4edaSMike Kravetz 
2765f41f2ed4SMuchun Song 	__prep_new_huge_page(h, new_page);
2766369fa227SOscar Salvador 
2767369fa227SOscar Salvador retry:
2768369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2769369fa227SOscar Salvador 	if (!PageHuge(old_page)) {
2770369fa227SOscar Salvador 		/*
2771369fa227SOscar Salvador 		 * Freed from under us. Drop new_page too.
2772369fa227SOscar Salvador 		 */
2773369fa227SOscar Salvador 		goto free_new;
2774369fa227SOscar Salvador 	} else if (page_count(old_page)) {
2775369fa227SOscar Salvador 		/*
2776ae37c7ffSOscar Salvador 		 * Someone has grabbed the page, try to isolate it here.
2777ae37c7ffSOscar Salvador 		 * Fail with -EBUSY if not possible.
2778369fa227SOscar Salvador 		 */
2779ae37c7ffSOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
27807ce82f4cSMiaohe Lin 		ret = isolate_hugetlb(old_page, list);
2781ae37c7ffSOscar Salvador 		spin_lock_irq(&hugetlb_lock);
2782369fa227SOscar Salvador 		goto free_new;
2783369fa227SOscar Salvador 	} else if (!HPageFreed(old_page)) {
2784369fa227SOscar Salvador 		/*
2785369fa227SOscar Salvador 		 * Page's refcount is 0 but it has not been enqueued in the
2786369fa227SOscar Salvador 		 * freelist yet. Race window is small, so we can succeed here if
2787369fa227SOscar Salvador 		 * we retry.
2788369fa227SOscar Salvador 		 */
2789369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2790369fa227SOscar Salvador 		cond_resched();
2791369fa227SOscar Salvador 		goto retry;
2792369fa227SOscar Salvador 	} else {
2793369fa227SOscar Salvador 		/*
2794369fa227SOscar Salvador 		 * Ok, old_page is still a genuine free hugepage. Remove it from
2795369fa227SOscar Salvador 		 * the freelist and decrease the counters. These will be
2796369fa227SOscar Salvador 		 * incremented again when calling __prep_account_new_huge_page()
2797369fa227SOscar Salvador 		 * and enqueue_huge_page() for new_page. The counters will remain
2798369fa227SOscar Salvador 		 * stable since this happens under the lock.
2799369fa227SOscar Salvador 		 */
2800369fa227SOscar Salvador 		remove_hugetlb_page(h, old_page, false);
2801369fa227SOscar Salvador 
2802369fa227SOscar Salvador 		/*
2803b65a4edaSMike Kravetz 		 * Ref count on new page is already zero as it was dropped
2804b65a4edaSMike Kravetz 		 * earlier.  It can be directly added to the pool free list.
2805369fa227SOscar Salvador 		 */
2806369fa227SOscar Salvador 		__prep_account_new_huge_page(h, nid);
2807369fa227SOscar Salvador 		enqueue_huge_page(h, new_page);
2808369fa227SOscar Salvador 
2809369fa227SOscar Salvador 		/*
2810369fa227SOscar Salvador 		 * Pages have been replaced, we can safely free the old one.
2811369fa227SOscar Salvador 		 */
2812369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2813b65d4adbSMuchun Song 		update_and_free_page(h, old_page, false);
2814369fa227SOscar Salvador 	}
2815369fa227SOscar Salvador 
2816369fa227SOscar Salvador 	return ret;
2817369fa227SOscar Salvador 
2818369fa227SOscar Salvador free_new:
2819369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2820b65a4edaSMike Kravetz 	/* Page has a zero ref count, but needs a ref to be freed */
2821b65a4edaSMike Kravetz 	set_page_refcounted(new_page);
2822b65d4adbSMuchun Song 	update_and_free_page(h, new_page, false);
2823369fa227SOscar Salvador 
2824369fa227SOscar Salvador 	return ret;
2825369fa227SOscar Salvador }
2826369fa227SOscar Salvador 
2827ae37c7ffSOscar Salvador int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2828369fa227SOscar Salvador {
2829369fa227SOscar Salvador 	struct hstate *h;
2830369fa227SOscar Salvador 	struct page *head;
2831ae37c7ffSOscar Salvador 	int ret = -EBUSY;
2832369fa227SOscar Salvador 
2833369fa227SOscar Salvador 	/*
2834369fa227SOscar Salvador 	 * The page might have been dissolved from under our feet, so make sure
2835369fa227SOscar Salvador 	 * to carefully check the state under the lock.
2836369fa227SOscar Salvador 	 * Return success when racing as if we dissolved the page ourselves.
2837369fa227SOscar Salvador 	 */
2838369fa227SOscar Salvador 	spin_lock_irq(&hugetlb_lock);
2839369fa227SOscar Salvador 	if (PageHuge(page)) {
2840369fa227SOscar Salvador 		head = compound_head(page);
2841369fa227SOscar Salvador 		h = page_hstate(head);
2842369fa227SOscar Salvador 	} else {
2843369fa227SOscar Salvador 		spin_unlock_irq(&hugetlb_lock);
2844369fa227SOscar Salvador 		return 0;
2845369fa227SOscar Salvador 	}
2846369fa227SOscar Salvador 	spin_unlock_irq(&hugetlb_lock);
2847369fa227SOscar Salvador 
2848369fa227SOscar Salvador 	/*
2849369fa227SOscar Salvador 	 * Fence off gigantic pages as there is a cyclic dependency between
2850369fa227SOscar Salvador 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2851369fa227SOscar Salvador 	 * of bailing out right away without further retrying.
2852369fa227SOscar Salvador 	 */
2853369fa227SOscar Salvador 	if (hstate_is_gigantic(h))
2854369fa227SOscar Salvador 		return -ENOMEM;
2855369fa227SOscar Salvador 
28567ce82f4cSMiaohe Lin 	if (page_count(head) && !isolate_hugetlb(head, list))
2857ae37c7ffSOscar Salvador 		ret = 0;
2858ae37c7ffSOscar Salvador 	else if (!page_count(head))
2859ae37c7ffSOscar Salvador 		ret = alloc_and_dissolve_huge_page(h, head, list);
2860ae37c7ffSOscar Salvador 
2861ae37c7ffSOscar Salvador 	return ret;
2862369fa227SOscar Salvador }
2863369fa227SOscar Salvador 
286470c3547eSMike Kravetz struct page *alloc_huge_page(struct vm_area_struct *vma,
286504f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
2866348ea204SAdam Litke {
286790481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
2868a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2869348ea204SAdam Litke 	struct page *page;
2870d85f69b0SMike Kravetz 	long map_chg, map_commit;
2871d85f69b0SMike Kravetz 	long gbl_chg;
28726d76dcf4SAneesh Kumar K.V 	int ret, idx;
28736d76dcf4SAneesh Kumar K.V 	struct hugetlb_cgroup *h_cg;
287408cf9fafSMina Almasry 	bool deferred_reserve;
28752fc39cecSAdam Litke 
28766d76dcf4SAneesh Kumar K.V 	idx = hstate_index(h);
2877a1e78772SMel Gorman 	/*
2878d85f69b0SMike Kravetz 	 * Examine the region/reserve map to determine if the process
2879d85f69b0SMike Kravetz 	 * has a reservation for the page to be allocated.  A return
2880d85f69b0SMike Kravetz 	 * code of zero indicates a reservation exists (no change).
2881a1e78772SMel Gorman 	 */
2882d85f69b0SMike Kravetz 	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2883d85f69b0SMike Kravetz 	if (map_chg < 0)
288476dcee75SAneesh Kumar K.V 		return ERR_PTR(-ENOMEM);
2885d85f69b0SMike Kravetz 
2886d85f69b0SMike Kravetz 	/*
2887d85f69b0SMike Kravetz 	 * Processes that did not create the mapping will have no
2888d85f69b0SMike Kravetz 	 * reserves as indicated by the region/reserve map. Check
2889d85f69b0SMike Kravetz 	 * that the allocation will not exceed the subpool limit.
2890d85f69b0SMike Kravetz 	 * Allocations for MAP_NORESERVE mappings also need to be
2891d85f69b0SMike Kravetz 	 * checked against any subpool limit.
2892d85f69b0SMike Kravetz 	 */
2893d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve) {
2894d85f69b0SMike Kravetz 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2895d85f69b0SMike Kravetz 		if (gbl_chg < 0) {
2896feba16e2SMike Kravetz 			vma_end_reservation(h, vma, addr);
289776dcee75SAneesh Kumar K.V 			return ERR_PTR(-ENOSPC);
28985e911373SMike Kravetz 		}
289990d8b7e6SAdam Litke 
2900d85f69b0SMike Kravetz 		/*
2901d85f69b0SMike Kravetz 		 * Even though there was no reservation in the region/reserve
2902d85f69b0SMike Kravetz 		 * map, there could be reservations associated with the
2903d85f69b0SMike Kravetz 		 * subpool that can be used.  This would be indicated if the
2904d85f69b0SMike Kravetz 		 * return value of hugepage_subpool_get_pages() is zero.
2905d85f69b0SMike Kravetz 		 * However, if avoid_reserve is specified we still avoid even
2906d85f69b0SMike Kravetz 		 * the subpool reservations.
2907d85f69b0SMike Kravetz 		 */
2908d85f69b0SMike Kravetz 		if (avoid_reserve)
2909d85f69b0SMike Kravetz 			gbl_chg = 1;
2910d85f69b0SMike Kravetz 	}
2911d85f69b0SMike Kravetz 
291208cf9fafSMina Almasry 	/* If this allocation is not consuming a reservation, charge it now.
291308cf9fafSMina Almasry 	 */
29146501fe5fSMiaohe Lin 	deferred_reserve = map_chg || avoid_reserve;
291508cf9fafSMina Almasry 	if (deferred_reserve) {
291608cf9fafSMina Almasry 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
291708cf9fafSMina Almasry 			idx, pages_per_huge_page(h), &h_cg);
29188f34af6fSJianyu Zhan 		if (ret)
29198f34af6fSJianyu Zhan 			goto out_subpool_put;
292008cf9fafSMina Almasry 	}
292108cf9fafSMina Almasry 
292208cf9fafSMina Almasry 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
292308cf9fafSMina Almasry 	if (ret)
292408cf9fafSMina Almasry 		goto out_uncharge_cgroup_reservation;
29258f34af6fSJianyu Zhan 
2926db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
2927d85f69b0SMike Kravetz 	/*
2928d85f69b0SMike Kravetz 	 * glb_chg is passed to indicate whether or not a page must be taken
2929d85f69b0SMike Kravetz 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2930d85f69b0SMike Kravetz 	 * a reservation exists for the allocation.
2931d85f69b0SMike Kravetz 	 */
2932d85f69b0SMike Kravetz 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
293381a6fcaeSJoonsoo Kim 	if (!page) {
2934db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
29350c397daeSMichal Hocko 		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
29368f34af6fSJianyu Zhan 		if (!page)
29378f34af6fSJianyu Zhan 			goto out_uncharge_cgroup;
2938a88c7695SNaoya Horiguchi 		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2939d6995da3SMike Kravetz 			SetHPageRestoreReserve(page);
2940a88c7695SNaoya Horiguchi 			h->resv_huge_pages--;
2941a88c7695SNaoya Horiguchi 		}
2942db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
294315a8d68eSWei Yang 		list_add(&page->lru, &h->hugepage_activelist);
294481a6fcaeSJoonsoo Kim 		/* Fall through */
2945a1e78772SMel Gorman 	}
294681a6fcaeSJoonsoo Kim 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
294708cf9fafSMina Almasry 	/* If allocation is not consuming a reservation, also store the
294808cf9fafSMina Almasry 	 * hugetlb_cgroup pointer on the page.
294908cf9fafSMina Almasry 	 */
295008cf9fafSMina Almasry 	if (deferred_reserve) {
295108cf9fafSMina Almasry 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
295208cf9fafSMina Almasry 						  h_cg, page);
295308cf9fafSMina Almasry 	}
295408cf9fafSMina Almasry 
2955db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
2956a1e78772SMel Gorman 
2957d6995da3SMike Kravetz 	hugetlb_set_page_subpool(page, spool);
2958a1e78772SMel Gorman 
2959d85f69b0SMike Kravetz 	map_commit = vma_commit_reservation(h, vma, addr);
2960d85f69b0SMike Kravetz 	if (unlikely(map_chg > map_commit)) {
296133039678SMike Kravetz 		/*
296233039678SMike Kravetz 		 * The page was added to the reservation map between
296333039678SMike Kravetz 		 * vma_needs_reservation and vma_commit_reservation.
296433039678SMike Kravetz 		 * This indicates a race with hugetlb_reserve_pages.
296533039678SMike Kravetz 		 * Adjust for the subpool count incremented above AND
296633039678SMike Kravetz 		 * in hugetlb_reserve_pages for the same page.  Also,
296733039678SMike Kravetz 		 * the reservation count added in hugetlb_reserve_pages
296833039678SMike Kravetz 		 * no longer applies.
296933039678SMike Kravetz 		 */
297033039678SMike Kravetz 		long rsv_adjust;
297133039678SMike Kravetz 
297233039678SMike Kravetz 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
297333039678SMike Kravetz 		hugetlb_acct_memory(h, -rsv_adjust);
297479aa925bSMike Kravetz 		if (deferred_reserve)
297579aa925bSMike Kravetz 			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
297679aa925bSMike Kravetz 					pages_per_huge_page(h), page);
297733039678SMike Kravetz 	}
29787893d1d5SAdam Litke 	return page;
29798f34af6fSJianyu Zhan 
29808f34af6fSJianyu Zhan out_uncharge_cgroup:
29818f34af6fSJianyu Zhan 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
298208cf9fafSMina Almasry out_uncharge_cgroup_reservation:
298308cf9fafSMina Almasry 	if (deferred_reserve)
298408cf9fafSMina Almasry 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
298508cf9fafSMina Almasry 						    h_cg);
29868f34af6fSJianyu Zhan out_subpool_put:
2987d85f69b0SMike Kravetz 	if (map_chg || avoid_reserve)
29888f34af6fSJianyu Zhan 		hugepage_subpool_put_pages(spool, 1);
2989feba16e2SMike Kravetz 	vma_end_reservation(h, vma, addr);
29908f34af6fSJianyu Zhan 	return ERR_PTR(-ENOSPC);
2991b45b5bd6SDavid Gibson }
2992b45b5bd6SDavid Gibson 
2993b5389086SZhenguo Yao int alloc_bootmem_huge_page(struct hstate *h, int nid)
2994e24a1307SAneesh Kumar K.V 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2995b5389086SZhenguo Yao int __alloc_bootmem_huge_page(struct hstate *h, int nid)
2996aa888a74SAndi Kleen {
2997b5389086SZhenguo Yao 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
2998b2261026SJoonsoo Kim 	int nr_nodes, node;
2999aa888a74SAndi Kleen 
3000b5389086SZhenguo Yao 	/* do node specific alloc */
3001b5389086SZhenguo Yao 	if (nid != NUMA_NO_NODE) {
3002b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3003b5389086SZhenguo Yao 				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3004b5389086SZhenguo Yao 		if (!m)
3005b5389086SZhenguo Yao 			return 0;
3006b5389086SZhenguo Yao 		goto found;
3007b5389086SZhenguo Yao 	}
3008b5389086SZhenguo Yao 	/* allocate from next node when distributing huge pages */
3009b2261026SJoonsoo Kim 	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3010b5389086SZhenguo Yao 		m = memblock_alloc_try_nid_raw(
30118b89a116SGrygorii Strashko 				huge_page_size(h), huge_page_size(h),
301297ad1087SMike Rapoport 				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3013aa888a74SAndi Kleen 		/*
3014aa888a74SAndi Kleen 		 * Use the beginning of the huge page to store the
3015aa888a74SAndi Kleen 		 * huge_bootmem_page struct (until gather_bootmem
3016aa888a74SAndi Kleen 		 * puts them into the mem_map).
3017aa888a74SAndi Kleen 		 */
3018b5389086SZhenguo Yao 		if (!m)
3019b5389086SZhenguo Yao 			return 0;
3020aa888a74SAndi Kleen 		goto found;
3021aa888a74SAndi Kleen 	}
3022aa888a74SAndi Kleen 
3023aa888a74SAndi Kleen found:
3024aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
3025330d6e48SCannon Matthews 	INIT_LIST_HEAD(&m->list);
3026aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
3027aa888a74SAndi Kleen 	m->hstate = h;
3028aa888a74SAndi Kleen 	return 1;
3029aa888a74SAndi Kleen }
3030aa888a74SAndi Kleen 
303148b8d744SMike Kravetz /*
303248b8d744SMike Kravetz  * Put bootmem huge pages into the standard lists after mem_map is up.
303348b8d744SMike Kravetz  * Note: This only applies to gigantic (order > MAX_ORDER) pages.
303448b8d744SMike Kravetz  */
3035aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
3036aa888a74SAndi Kleen {
3037aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
3038aa888a74SAndi Kleen 
3039aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
304040d18ebfSMike Kravetz 		struct page *page = virt_to_page(m);
3041aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
3042ee8f248dSBecky Bruce 
304348b8d744SMike Kravetz 		VM_BUG_ON(!hstate_is_gigantic(h));
3044aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
30457118fc29SMike Kravetz 		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3046ef5a22beSAndrea Arcangeli 			WARN_ON(PageReserved(page));
3047aa888a74SAndi Kleen 			prep_new_huge_page(h, page, page_to_nid(page));
30487118fc29SMike Kravetz 			put_page(page); /* add to the hugepage allocator */
30497118fc29SMike Kravetz 		} else {
3050416d85edSMike Kravetz 			/* VERY unlikely inflated ref count on a tail page */
30517118fc29SMike Kravetz 			free_gigantic_page(page, huge_page_order(h));
30527118fc29SMike Kravetz 		}
3053af0fb9dfSMichal Hocko 
3054b0320c7bSRafael Aquini 		/*
305548b8d744SMike Kravetz 		 * We need to restore the 'stolen' pages to totalram_pages
305648b8d744SMike Kravetz 		 * in order to fix confusing memory reports from free(1) and
305748b8d744SMike Kravetz 		 * other side-effects, like CommitLimit going negative.
3058b0320c7bSRafael Aquini 		 */
3059c78a7f36SMiaohe Lin 		adjust_managed_page_count(page, pages_per_huge_page(h));
3060520495feSCannon Matthews 		cond_resched();
3061aa888a74SAndi Kleen 	}
3062aa888a74SAndi Kleen }
3063b5389086SZhenguo Yao static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3064b5389086SZhenguo Yao {
3065b5389086SZhenguo Yao 	unsigned long i;
3066b5389086SZhenguo Yao 	char buf[32];
3067b5389086SZhenguo Yao 
3068b5389086SZhenguo Yao 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3069b5389086SZhenguo Yao 		if (hstate_is_gigantic(h)) {
3070b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, nid))
3071b5389086SZhenguo Yao 				break;
3072b5389086SZhenguo Yao 		} else {
3073b5389086SZhenguo Yao 			struct page *page;
3074b5389086SZhenguo Yao 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3075b5389086SZhenguo Yao 
3076b5389086SZhenguo Yao 			page = alloc_fresh_huge_page(h, gfp_mask, nid,
3077b5389086SZhenguo Yao 					&node_states[N_MEMORY], NULL);
3078b5389086SZhenguo Yao 			if (!page)
3079b5389086SZhenguo Yao 				break;
3080b5389086SZhenguo Yao 			put_page(page); /* free it into the hugepage allocator */
3081b5389086SZhenguo Yao 		}
3082b5389086SZhenguo Yao 		cond_resched();
3083b5389086SZhenguo Yao 	}
3084b5389086SZhenguo Yao 	if (i == h->max_huge_pages_node[nid])
3085b5389086SZhenguo Yao 		return;
3086b5389086SZhenguo Yao 
3087b5389086SZhenguo Yao 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3088b5389086SZhenguo Yao 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3089b5389086SZhenguo Yao 		h->max_huge_pages_node[nid], buf, nid, i);
3090b5389086SZhenguo Yao 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3091b5389086SZhenguo Yao 	h->max_huge_pages_node[nid] = i;
3092b5389086SZhenguo Yao }
3093aa888a74SAndi Kleen 
30948faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
30951da177e4SLinus Torvalds {
30961da177e4SLinus Torvalds 	unsigned long i;
3097f60858f9SMike Kravetz 	nodemask_t *node_alloc_noretry;
3098b5389086SZhenguo Yao 	bool node_specific_alloc = false;
3099f60858f9SMike Kravetz 
3100b5389086SZhenguo Yao 	/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3101b5389086SZhenguo Yao 	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3102b5389086SZhenguo Yao 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3103b5389086SZhenguo Yao 		return;
3104b5389086SZhenguo Yao 	}
3105b5389086SZhenguo Yao 
3106b5389086SZhenguo Yao 	/* do node specific alloc */
31070a7a0f6fSPeng Liu 	for_each_online_node(i) {
3108b5389086SZhenguo Yao 		if (h->max_huge_pages_node[i] > 0) {
3109b5389086SZhenguo Yao 			hugetlb_hstate_alloc_pages_onenode(h, i);
3110b5389086SZhenguo Yao 			node_specific_alloc = true;
3111b5389086SZhenguo Yao 		}
3112b5389086SZhenguo Yao 	}
3113b5389086SZhenguo Yao 
3114b5389086SZhenguo Yao 	if (node_specific_alloc)
3115b5389086SZhenguo Yao 		return;
3116b5389086SZhenguo Yao 
3117b5389086SZhenguo Yao 	/* below will do all node balanced alloc */
3118f60858f9SMike Kravetz 	if (!hstate_is_gigantic(h)) {
3119f60858f9SMike Kravetz 		/*
3120f60858f9SMike Kravetz 		 * Bit mask controlling how hard we retry per-node allocations.
3121f60858f9SMike Kravetz 		 * Ignore errors as lower level routines can deal with
3122f60858f9SMike Kravetz 		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3123f60858f9SMike Kravetz 		 * time, we are likely in bigger trouble.
3124f60858f9SMike Kravetz 		 */
3125f60858f9SMike Kravetz 		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3126f60858f9SMike Kravetz 						GFP_KERNEL);
3127f60858f9SMike Kravetz 	} else {
3128f60858f9SMike Kravetz 		/* allocations done at boot time */
3129f60858f9SMike Kravetz 		node_alloc_noretry = NULL;
3130f60858f9SMike Kravetz 	}
3131f60858f9SMike Kravetz 
3132f60858f9SMike Kravetz 	/* bit mask controlling how hard we retry per-node allocations */
3133f60858f9SMike Kravetz 	if (node_alloc_noretry)
3134f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
31351da177e4SLinus Torvalds 
3136e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
3137bae7f4aeSLuiz Capitulino 		if (hstate_is_gigantic(h)) {
3138b5389086SZhenguo Yao 			if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3139aa888a74SAndi Kleen 				break;
31400c397daeSMichal Hocko 		} else if (!alloc_pool_huge_page(h,
3141f60858f9SMike Kravetz 					 &node_states[N_MEMORY],
3142f60858f9SMike Kravetz 					 node_alloc_noretry))
31431da177e4SLinus Torvalds 			break;
314469ed779aSDavid Rientjes 		cond_resched();
31451da177e4SLinus Torvalds 	}
3146d715cf80SLiam R. Howlett 	if (i < h->max_huge_pages) {
3147d715cf80SLiam R. Howlett 		char buf[32];
3148d715cf80SLiam R. Howlett 
3149c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3150d715cf80SLiam R. Howlett 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3151d715cf80SLiam R. Howlett 			h->max_huge_pages, buf, i);
31528faa8b07SAndi Kleen 		h->max_huge_pages = i;
3153e5ff2159SAndi Kleen 	}
3154f60858f9SMike Kravetz 	kfree(node_alloc_noretry);
3155d715cf80SLiam R. Howlett }
3156e5ff2159SAndi Kleen 
3157e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
3158e5ff2159SAndi Kleen {
315979dfc695SMike Kravetz 	struct hstate *h, *h2;
3160e5ff2159SAndi Kleen 
3161e5ff2159SAndi Kleen 	for_each_hstate(h) {
31628faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
3163bae7f4aeSLuiz Capitulino 		if (!hstate_is_gigantic(h))
31648faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
316579dfc695SMike Kravetz 
316679dfc695SMike Kravetz 		/*
316779dfc695SMike Kravetz 		 * Set demote order for each hstate.  Note that
316879dfc695SMike Kravetz 		 * h->demote_order is initially 0.
316979dfc695SMike Kravetz 		 * - We can not demote gigantic pages if runtime freeing
317079dfc695SMike Kravetz 		 *   is not supported, so skip this.
3171a01f4390SMike Kravetz 		 * - If CMA allocation is possible, we can not demote
3172a01f4390SMike Kravetz 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
317379dfc695SMike Kravetz 		 */
317479dfc695SMike Kravetz 		if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
317579dfc695SMike Kravetz 			continue;
3176a01f4390SMike Kravetz 		if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3177a01f4390SMike Kravetz 			continue;
317879dfc695SMike Kravetz 		for_each_hstate(h2) {
317979dfc695SMike Kravetz 			if (h2 == h)
318079dfc695SMike Kravetz 				continue;
318179dfc695SMike Kravetz 			if (h2->order < h->order &&
318279dfc695SMike Kravetz 			    h2->order > h->demote_order)
318379dfc695SMike Kravetz 				h->demote_order = h2->order;
318479dfc695SMike Kravetz 		}
3185e5ff2159SAndi Kleen 	}
3186e5ff2159SAndi Kleen }
3187e5ff2159SAndi Kleen 
3188e5ff2159SAndi Kleen static void __init report_hugepages(void)
3189e5ff2159SAndi Kleen {
3190e5ff2159SAndi Kleen 	struct hstate *h;
3191e5ff2159SAndi Kleen 
3192e5ff2159SAndi Kleen 	for_each_hstate(h) {
31934abd32dbSAndi Kleen 		char buf[32];
3194c6247f72SMatthew Wilcox 
3195c6247f72SMatthew Wilcox 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
31966213834cSMuchun Song 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3197c6247f72SMatthew Wilcox 			buf, h->free_huge_pages);
31986213834cSMuchun Song 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
31996213834cSMuchun Song 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3200e5ff2159SAndi Kleen 	}
3201e5ff2159SAndi Kleen }
3202e5ff2159SAndi Kleen 
32031da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
32046ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
32056ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
32061da177e4SLinus Torvalds {
32074415cc8dSChristoph Lameter 	int i;
32081121828aSMike Kravetz 	LIST_HEAD(page_list);
32094415cc8dSChristoph Lameter 
32109487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
3211bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3212aa888a74SAndi Kleen 		return;
3213aa888a74SAndi Kleen 
32141121828aSMike Kravetz 	/*
32151121828aSMike Kravetz 	 * Collect pages to be freed on a list, and free after dropping lock
32161121828aSMike Kravetz 	 */
32176ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
321810c6ec49SMike Kravetz 		struct page *page, *next;
3219a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
3220a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
3221a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
32221121828aSMike Kravetz 				goto out;
32231da177e4SLinus Torvalds 			if (PageHighMem(page))
32241da177e4SLinus Torvalds 				continue;
32256eb4e88aSMike Kravetz 			remove_hugetlb_page(h, page, false);
32261121828aSMike Kravetz 			list_add(&page->lru, &page_list);
32271121828aSMike Kravetz 		}
32281121828aSMike Kravetz 	}
32291121828aSMike Kravetz 
32301121828aSMike Kravetz out:
3231db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
323210c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3233db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
32341da177e4SLinus Torvalds }
32351da177e4SLinus Torvalds #else
32366ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
32376ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
32381da177e4SLinus Torvalds {
32391da177e4SLinus Torvalds }
32401da177e4SLinus Torvalds #endif
32411da177e4SLinus Torvalds 
324220a0307cSWu Fengguang /*
324320a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
324420a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
324520a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
324620a0307cSWu Fengguang  */
32476ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
32486ae11b27SLee Schermerhorn 				int delta)
324920a0307cSWu Fengguang {
3250b2261026SJoonsoo Kim 	int nr_nodes, node;
325120a0307cSWu Fengguang 
32529487ca60SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
325320a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
325420a0307cSWu Fengguang 
3255e8c5c824SLee Schermerhorn 	if (delta < 0) {
3256b2261026SJoonsoo Kim 		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3257b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node])
3258b2261026SJoonsoo Kim 				goto found;
3259b2261026SJoonsoo Kim 		}
3260b2261026SJoonsoo Kim 	} else {
3261b2261026SJoonsoo Kim 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3262b2261026SJoonsoo Kim 			if (h->surplus_huge_pages_node[node] <
3263b2261026SJoonsoo Kim 					h->nr_huge_pages_node[node])
3264b2261026SJoonsoo Kim 				goto found;
3265e8c5c824SLee Schermerhorn 		}
32669a76db09SLee Schermerhorn 	}
3267b2261026SJoonsoo Kim 	return 0;
326820a0307cSWu Fengguang 
3269b2261026SJoonsoo Kim found:
327020a0307cSWu Fengguang 	h->surplus_huge_pages += delta;
3271b2261026SJoonsoo Kim 	h->surplus_huge_pages_node[node] += delta;
3272b2261026SJoonsoo Kim 	return 1;
327320a0307cSWu Fengguang }
327420a0307cSWu Fengguang 
3275a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3276fd875dcaSMike Kravetz static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
32776ae11b27SLee Schermerhorn 			      nodemask_t *nodes_allowed)
32781da177e4SLinus Torvalds {
32797893d1d5SAdam Litke 	unsigned long min_count, ret;
328010c6ec49SMike Kravetz 	struct page *page;
328110c6ec49SMike Kravetz 	LIST_HEAD(page_list);
3282f60858f9SMike Kravetz 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3283f60858f9SMike Kravetz 
3284f60858f9SMike Kravetz 	/*
3285f60858f9SMike Kravetz 	 * Bit mask controlling how hard we retry per-node allocations.
3286f60858f9SMike Kravetz 	 * If we can not allocate the bit mask, do not attempt to allocate
3287f60858f9SMike Kravetz 	 * the requested huge pages.
3288f60858f9SMike Kravetz 	 */
3289f60858f9SMike Kravetz 	if (node_alloc_noretry)
3290f60858f9SMike Kravetz 		nodes_clear(*node_alloc_noretry);
3291f60858f9SMike Kravetz 	else
3292f60858f9SMike Kravetz 		return -ENOMEM;
32931da177e4SLinus Torvalds 
329429383967SMike Kravetz 	/*
329529383967SMike Kravetz 	 * resize_lock mutex prevents concurrent adjustments to number of
329629383967SMike Kravetz 	 * pages in hstate via the proc/sysfs interfaces.
329729383967SMike Kravetz 	 */
329829383967SMike Kravetz 	mutex_lock(&h->resize_lock);
3299b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3300db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
33014eb0716eSAlexandre Ghiti 
33024eb0716eSAlexandre Ghiti 	/*
3303fd875dcaSMike Kravetz 	 * Check for a node specific request.
3304fd875dcaSMike Kravetz 	 * Changing node specific huge page count may require a corresponding
3305fd875dcaSMike Kravetz 	 * change to the global count.  In any case, the passed node mask
3306fd875dcaSMike Kravetz 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3307fd875dcaSMike Kravetz 	 */
3308fd875dcaSMike Kravetz 	if (nid != NUMA_NO_NODE) {
3309fd875dcaSMike Kravetz 		unsigned long old_count = count;
3310fd875dcaSMike Kravetz 
3311fd875dcaSMike Kravetz 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3312fd875dcaSMike Kravetz 		/*
3313fd875dcaSMike Kravetz 		 * User may have specified a large count value which caused the
3314fd875dcaSMike Kravetz 		 * above calculation to overflow.  In this case, they wanted
3315fd875dcaSMike Kravetz 		 * to allocate as many huge pages as possible.  Set count to
3316fd875dcaSMike Kravetz 		 * largest possible value to align with their intention.
3317fd875dcaSMike Kravetz 		 */
3318fd875dcaSMike Kravetz 		if (count < old_count)
3319fd875dcaSMike Kravetz 			count = ULONG_MAX;
3320fd875dcaSMike Kravetz 	}
3321fd875dcaSMike Kravetz 
3322fd875dcaSMike Kravetz 	/*
33234eb0716eSAlexandre Ghiti 	 * Gigantic pages runtime allocation depend on the capability for large
33244eb0716eSAlexandre Ghiti 	 * page range allocation.
33254eb0716eSAlexandre Ghiti 	 * If the system does not provide this feature, return an error when
33264eb0716eSAlexandre Ghiti 	 * the user tries to allocate gigantic pages but let the user free the
33274eb0716eSAlexandre Ghiti 	 * boottime allocated gigantic pages.
33284eb0716eSAlexandre Ghiti 	 */
33294eb0716eSAlexandre Ghiti 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
33304eb0716eSAlexandre Ghiti 		if (count > persistent_huge_pages(h)) {
3331db71ef79SMike Kravetz 			spin_unlock_irq(&hugetlb_lock);
333229383967SMike Kravetz 			mutex_unlock(&h->resize_lock);
3333f60858f9SMike Kravetz 			NODEMASK_FREE(node_alloc_noretry);
33344eb0716eSAlexandre Ghiti 			return -EINVAL;
33354eb0716eSAlexandre Ghiti 		}
33364eb0716eSAlexandre Ghiti 		/* Fall through to decrease pool */
33374eb0716eSAlexandre Ghiti 	}
3338aa888a74SAndi Kleen 
33397893d1d5SAdam Litke 	/*
33407893d1d5SAdam Litke 	 * Increase the pool size
33417893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
33427893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
3343d1c3fb1fSNishanth Aravamudan 	 *
33440c397daeSMichal Hocko 	 * We might race with alloc_surplus_huge_page() here and be unable
3345d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
3346d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
3347d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
3348d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
33497893d1d5SAdam Litke 	 */
3350a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
33516ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
33527893d1d5SAdam Litke 			break;
33537893d1d5SAdam Litke 	}
33547893d1d5SAdam Litke 
3355a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
33567893d1d5SAdam Litke 		/*
33577893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
33587893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
33597893d1d5SAdam Litke 		 * and reducing the surplus.
33607893d1d5SAdam Litke 		 */
3361db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
3362649920c6SJia He 
3363649920c6SJia He 		/* yield cpu to avoid soft lockup */
3364649920c6SJia He 		cond_resched();
3365649920c6SJia He 
3366f60858f9SMike Kravetz 		ret = alloc_pool_huge_page(h, nodes_allowed,
3367f60858f9SMike Kravetz 						node_alloc_noretry);
3368db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
33697893d1d5SAdam Litke 		if (!ret)
33707893d1d5SAdam Litke 			goto out;
33717893d1d5SAdam Litke 
3372536240f2SMel Gorman 		/* Bail for signals. Probably ctrl-c from user */
3373536240f2SMel Gorman 		if (signal_pending(current))
3374536240f2SMel Gorman 			goto out;
33757893d1d5SAdam Litke 	}
33767893d1d5SAdam Litke 
33777893d1d5SAdam Litke 	/*
33787893d1d5SAdam Litke 	 * Decrease the pool size
33797893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
33807893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
33817893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
33827893d1d5SAdam Litke 	 * to the desired size as pages become free.
3383d1c3fb1fSNishanth Aravamudan 	 *
3384d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
3385d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
3386d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
33870c397daeSMichal Hocko 	 * alloc_surplus_huge_page() is checking the global counter,
3388d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
3389d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
3390d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
33917893d1d5SAdam Litke 	 */
3392a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
33936b0c880dSAdam Litke 	min_count = max(count, min_count);
33946ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
339510c6ec49SMike Kravetz 
339610c6ec49SMike Kravetz 	/*
339710c6ec49SMike Kravetz 	 * Collect pages to be removed on list without dropping lock
339810c6ec49SMike Kravetz 	 */
3399a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
340010c6ec49SMike Kravetz 		page = remove_pool_huge_page(h, nodes_allowed, 0);
340110c6ec49SMike Kravetz 		if (!page)
34021da177e4SLinus Torvalds 			break;
340310c6ec49SMike Kravetz 
340410c6ec49SMike Kravetz 		list_add(&page->lru, &page_list);
34051da177e4SLinus Torvalds 	}
340610c6ec49SMike Kravetz 	/* free the pages after dropping lock */
3407db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
340810c6ec49SMike Kravetz 	update_and_free_pages_bulk(h, &page_list);
3409b65d4adbSMuchun Song 	flush_free_hpage_work(h);
3410db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
341110c6ec49SMike Kravetz 
3412a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
34136ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
34147893d1d5SAdam Litke 			break;
34157893d1d5SAdam Litke 	}
34167893d1d5SAdam Litke out:
34174eb0716eSAlexandre Ghiti 	h->max_huge_pages = persistent_huge_pages(h);
3418db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
341929383967SMike Kravetz 	mutex_unlock(&h->resize_lock);
34204eb0716eSAlexandre Ghiti 
3421f60858f9SMike Kravetz 	NODEMASK_FREE(node_alloc_noretry);
3422f60858f9SMike Kravetz 
34234eb0716eSAlexandre Ghiti 	return 0;
34241da177e4SLinus Torvalds }
34251da177e4SLinus Torvalds 
34268531fc6fSMike Kravetz static int demote_free_huge_page(struct hstate *h, struct page *page)
34278531fc6fSMike Kravetz {
34288531fc6fSMike Kravetz 	int i, nid = page_to_nid(page);
34298531fc6fSMike Kravetz 	struct hstate *target_hstate;
343031731452SDoug Berger 	struct page *subpage;
34318531fc6fSMike Kravetz 	int rc = 0;
34328531fc6fSMike Kravetz 
34338531fc6fSMike Kravetz 	target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
34348531fc6fSMike Kravetz 
34358531fc6fSMike Kravetz 	remove_hugetlb_page_for_demote(h, page, false);
34368531fc6fSMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
34378531fc6fSMike Kravetz 
34386213834cSMuchun Song 	rc = hugetlb_vmemmap_restore(h, page);
34398531fc6fSMike Kravetz 	if (rc) {
34408531fc6fSMike Kravetz 		/* Allocation of vmemmmap failed, we can not demote page */
34418531fc6fSMike Kravetz 		spin_lock_irq(&hugetlb_lock);
34428531fc6fSMike Kravetz 		set_page_refcounted(page);
34438531fc6fSMike Kravetz 		add_hugetlb_page(h, page, false);
34448531fc6fSMike Kravetz 		return rc;
34458531fc6fSMike Kravetz 	}
34468531fc6fSMike Kravetz 
34478531fc6fSMike Kravetz 	/*
34488531fc6fSMike Kravetz 	 * Use destroy_compound_hugetlb_page_for_demote for all huge page
34498531fc6fSMike Kravetz 	 * sizes as it will not ref count pages.
34508531fc6fSMike Kravetz 	 */
34518531fc6fSMike Kravetz 	destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h));
34528531fc6fSMike Kravetz 
34538531fc6fSMike Kravetz 	/*
34548531fc6fSMike Kravetz 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
34558531fc6fSMike Kravetz 	 * Without the mutex, pages added to target hstate could be marked
34568531fc6fSMike Kravetz 	 * as surplus.
34578531fc6fSMike Kravetz 	 *
34588531fc6fSMike Kravetz 	 * Note that we already hold h->resize_lock.  To prevent deadlock,
34598531fc6fSMike Kravetz 	 * use the convention of always taking larger size hstate mutex first.
34608531fc6fSMike Kravetz 	 */
34618531fc6fSMike Kravetz 	mutex_lock(&target_hstate->resize_lock);
34628531fc6fSMike Kravetz 	for (i = 0; i < pages_per_huge_page(h);
34638531fc6fSMike Kravetz 				i += pages_per_huge_page(target_hstate)) {
346431731452SDoug Berger 		subpage = nth_page(page, i);
34658531fc6fSMike Kravetz 		if (hstate_is_gigantic(target_hstate))
346631731452SDoug Berger 			prep_compound_gigantic_page_for_demote(subpage,
34678531fc6fSMike Kravetz 							target_hstate->order);
34688531fc6fSMike Kravetz 		else
346931731452SDoug Berger 			prep_compound_page(subpage, target_hstate->order);
347031731452SDoug Berger 		set_page_private(subpage, 0);
347131731452SDoug Berger 		set_page_refcounted(subpage);
347231731452SDoug Berger 		prep_new_huge_page(target_hstate, subpage, nid);
347331731452SDoug Berger 		put_page(subpage);
34748531fc6fSMike Kravetz 	}
34758531fc6fSMike Kravetz 	mutex_unlock(&target_hstate->resize_lock);
34768531fc6fSMike Kravetz 
34778531fc6fSMike Kravetz 	spin_lock_irq(&hugetlb_lock);
34788531fc6fSMike Kravetz 
34798531fc6fSMike Kravetz 	/*
34808531fc6fSMike Kravetz 	 * Not absolutely necessary, but for consistency update max_huge_pages
34818531fc6fSMike Kravetz 	 * based on pool changes for the demoted page.
34828531fc6fSMike Kravetz 	 */
34838531fc6fSMike Kravetz 	h->max_huge_pages--;
3484a43a83c7SMiaohe Lin 	target_hstate->max_huge_pages +=
3485a43a83c7SMiaohe Lin 		pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
34868531fc6fSMike Kravetz 
34878531fc6fSMike Kravetz 	return rc;
34888531fc6fSMike Kravetz }
34898531fc6fSMike Kravetz 
349079dfc695SMike Kravetz static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
349179dfc695SMike Kravetz 	__must_hold(&hugetlb_lock)
349279dfc695SMike Kravetz {
34938531fc6fSMike Kravetz 	int nr_nodes, node;
34948531fc6fSMike Kravetz 	struct page *page;
349579dfc695SMike Kravetz 
349679dfc695SMike Kravetz 	lockdep_assert_held(&hugetlb_lock);
349779dfc695SMike Kravetz 
349879dfc695SMike Kravetz 	/* We should never get here if no demote order */
349979dfc695SMike Kravetz 	if (!h->demote_order) {
350079dfc695SMike Kravetz 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
350179dfc695SMike Kravetz 		return -EINVAL;		/* internal error */
350279dfc695SMike Kravetz 	}
350379dfc695SMike Kravetz 
35048531fc6fSMike Kravetz 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
35055a317412SMike Kravetz 		list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
35065a317412SMike Kravetz 			if (PageHWPoison(page))
35075a317412SMike Kravetz 				continue;
35085a317412SMike Kravetz 
35095a317412SMike Kravetz 			return demote_free_huge_page(h, page);
35108531fc6fSMike Kravetz 		}
35118531fc6fSMike Kravetz 	}
35128531fc6fSMike Kravetz 
35135a317412SMike Kravetz 	/*
35145a317412SMike Kravetz 	 * Only way to get here is if all pages on free lists are poisoned.
35155a317412SMike Kravetz 	 * Return -EBUSY so that caller will not retry.
35165a317412SMike Kravetz 	 */
35175a317412SMike Kravetz 	return -EBUSY;
351879dfc695SMike Kravetz }
351979dfc695SMike Kravetz 
3520a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
3521a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3522a3437870SNishanth Aravamudan 
352379dfc695SMike Kravetz #define HSTATE_ATTR_WO(_name) \
352479dfc695SMike Kravetz 	static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
352579dfc695SMike Kravetz 
3526a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
352798bc26acSMiaohe Lin 	static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3528a3437870SNishanth Aravamudan 
3529a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
3530a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3531a3437870SNishanth Aravamudan 
35329a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
35339a305230SLee Schermerhorn 
35349a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3535a3437870SNishanth Aravamudan {
3536a3437870SNishanth Aravamudan 	int i;
35379a305230SLee Schermerhorn 
3538a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
35399a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
35409a305230SLee Schermerhorn 			if (nidp)
35419a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
3542a3437870SNishanth Aravamudan 			return &hstates[i];
35439a305230SLee Schermerhorn 		}
35449a305230SLee Schermerhorn 
35459a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
3546a3437870SNishanth Aravamudan }
3547a3437870SNishanth Aravamudan 
354806808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3549a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3550a3437870SNishanth Aravamudan {
35519a305230SLee Schermerhorn 	struct hstate *h;
35529a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
35539a305230SLee Schermerhorn 	int nid;
35549a305230SLee Schermerhorn 
35559a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
35569a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
35579a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
35589a305230SLee Schermerhorn 	else
35599a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
35609a305230SLee Schermerhorn 
3561ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3562a3437870SNishanth Aravamudan }
3563adbe8726SEric B Munson 
3564238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3565238d3c13SDavid Rientjes 					   struct hstate *h, int nid,
3566238d3c13SDavid Rientjes 					   unsigned long count, size_t len)
3567a3437870SNishanth Aravamudan {
3568a3437870SNishanth Aravamudan 	int err;
35692d0adf7eSOscar Salvador 	nodemask_t nodes_allowed, *n_mask;
3570a3437870SNishanth Aravamudan 
35712d0adf7eSOscar Salvador 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
35722d0adf7eSOscar Salvador 		return -EINVAL;
3573adbe8726SEric B Munson 
35749a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
35759a305230SLee Schermerhorn 		/*
35769a305230SLee Schermerhorn 		 * global hstate attribute
35779a305230SLee Schermerhorn 		 */
35789a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
35792d0adf7eSOscar Salvador 				init_nodemask_of_mempolicy(&nodes_allowed)))
35802d0adf7eSOscar Salvador 			n_mask = &node_states[N_MEMORY];
35812d0adf7eSOscar Salvador 		else
35822d0adf7eSOscar Salvador 			n_mask = &nodes_allowed;
35832d0adf7eSOscar Salvador 	} else {
35849a305230SLee Schermerhorn 		/*
3585fd875dcaSMike Kravetz 		 * Node specific request.  count adjustment happens in
3586fd875dcaSMike Kravetz 		 * set_max_huge_pages() after acquiring hugetlb_lock.
35879a305230SLee Schermerhorn 		 */
35882d0adf7eSOscar Salvador 		init_nodemask_of_node(&nodes_allowed, nid);
35892d0adf7eSOscar Salvador 		n_mask = &nodes_allowed;
3590fd875dcaSMike Kravetz 	}
35919a305230SLee Schermerhorn 
35922d0adf7eSOscar Salvador 	err = set_max_huge_pages(h, count, nid, n_mask);
359306808b08SLee Schermerhorn 
35944eb0716eSAlexandre Ghiti 	return err ? err : len;
359506808b08SLee Schermerhorn }
359606808b08SLee Schermerhorn 
3597238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3598238d3c13SDavid Rientjes 					 struct kobject *kobj, const char *buf,
3599238d3c13SDavid Rientjes 					 size_t len)
3600238d3c13SDavid Rientjes {
3601238d3c13SDavid Rientjes 	struct hstate *h;
3602238d3c13SDavid Rientjes 	unsigned long count;
3603238d3c13SDavid Rientjes 	int nid;
3604238d3c13SDavid Rientjes 	int err;
3605238d3c13SDavid Rientjes 
3606238d3c13SDavid Rientjes 	err = kstrtoul(buf, 10, &count);
3607238d3c13SDavid Rientjes 	if (err)
3608238d3c13SDavid Rientjes 		return err;
3609238d3c13SDavid Rientjes 
3610238d3c13SDavid Rientjes 	h = kobj_to_hstate(kobj, &nid);
3611238d3c13SDavid Rientjes 	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3612238d3c13SDavid Rientjes }
3613238d3c13SDavid Rientjes 
361406808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
361506808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
361606808b08SLee Schermerhorn {
361706808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
361806808b08SLee Schermerhorn }
361906808b08SLee Schermerhorn 
362006808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
362106808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
362206808b08SLee Schermerhorn {
3623238d3c13SDavid Rientjes 	return nr_hugepages_store_common(false, kobj, buf, len);
3624a3437870SNishanth Aravamudan }
3625a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
3626a3437870SNishanth Aravamudan 
362706808b08SLee Schermerhorn #ifdef CONFIG_NUMA
362806808b08SLee Schermerhorn 
362906808b08SLee Schermerhorn /*
363006808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
363106808b08SLee Schermerhorn  * huge page alloc/free.
363206808b08SLee Schermerhorn  */
363306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3634ae7a927dSJoe Perches 					   struct kobj_attribute *attr,
3635ae7a927dSJoe Perches 					   char *buf)
363606808b08SLee Schermerhorn {
363706808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
363806808b08SLee Schermerhorn }
363906808b08SLee Schermerhorn 
364006808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
364106808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
364206808b08SLee Schermerhorn {
3643238d3c13SDavid Rientjes 	return nr_hugepages_store_common(true, kobj, buf, len);
364406808b08SLee Schermerhorn }
364506808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
364606808b08SLee Schermerhorn #endif
364706808b08SLee Schermerhorn 
364806808b08SLee Schermerhorn 
3649a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3650a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3651a3437870SNishanth Aravamudan {
36529a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3653ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3654a3437870SNishanth Aravamudan }
3655adbe8726SEric B Munson 
3656a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3657a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
3658a3437870SNishanth Aravamudan {
3659a3437870SNishanth Aravamudan 	int err;
3660a3437870SNishanth Aravamudan 	unsigned long input;
36619a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3662a3437870SNishanth Aravamudan 
3663bae7f4aeSLuiz Capitulino 	if (hstate_is_gigantic(h))
3664adbe8726SEric B Munson 		return -EINVAL;
3665adbe8726SEric B Munson 
36663dbb95f7SJingoo Han 	err = kstrtoul(buf, 10, &input);
3667a3437870SNishanth Aravamudan 	if (err)
366873ae31e5SEric B Munson 		return err;
3669a3437870SNishanth Aravamudan 
3670db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
3671a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
3672db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
3673a3437870SNishanth Aravamudan 
3674a3437870SNishanth Aravamudan 	return count;
3675a3437870SNishanth Aravamudan }
3676a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
3677a3437870SNishanth Aravamudan 
3678a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
3679a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3680a3437870SNishanth Aravamudan {
36819a305230SLee Schermerhorn 	struct hstate *h;
36829a305230SLee Schermerhorn 	unsigned long free_huge_pages;
36839a305230SLee Schermerhorn 	int nid;
36849a305230SLee Schermerhorn 
36859a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
36869a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
36879a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
36889a305230SLee Schermerhorn 	else
36899a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
36909a305230SLee Schermerhorn 
3691ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3692a3437870SNishanth Aravamudan }
3693a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
3694a3437870SNishanth Aravamudan 
3695a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
3696a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3697a3437870SNishanth Aravamudan {
36989a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
3699ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3700a3437870SNishanth Aravamudan }
3701a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
3702a3437870SNishanth Aravamudan 
3703a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
3704a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
3705a3437870SNishanth Aravamudan {
37069a305230SLee Schermerhorn 	struct hstate *h;
37079a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
37089a305230SLee Schermerhorn 	int nid;
37099a305230SLee Schermerhorn 
37109a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
37119a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
37129a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
37139a305230SLee Schermerhorn 	else
37149a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
37159a305230SLee Schermerhorn 
3716ae7a927dSJoe Perches 	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3717a3437870SNishanth Aravamudan }
3718a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
3719a3437870SNishanth Aravamudan 
372079dfc695SMike Kravetz static ssize_t demote_store(struct kobject *kobj,
372179dfc695SMike Kravetz 	       struct kobj_attribute *attr, const char *buf, size_t len)
372279dfc695SMike Kravetz {
372379dfc695SMike Kravetz 	unsigned long nr_demote;
372479dfc695SMike Kravetz 	unsigned long nr_available;
372579dfc695SMike Kravetz 	nodemask_t nodes_allowed, *n_mask;
372679dfc695SMike Kravetz 	struct hstate *h;
37278eeda55fSLi zeming 	int err;
372879dfc695SMike Kravetz 	int nid;
372979dfc695SMike Kravetz 
373079dfc695SMike Kravetz 	err = kstrtoul(buf, 10, &nr_demote);
373179dfc695SMike Kravetz 	if (err)
373279dfc695SMike Kravetz 		return err;
373379dfc695SMike Kravetz 	h = kobj_to_hstate(kobj, &nid);
373479dfc695SMike Kravetz 
373579dfc695SMike Kravetz 	if (nid != NUMA_NO_NODE) {
373679dfc695SMike Kravetz 		init_nodemask_of_node(&nodes_allowed, nid);
373779dfc695SMike Kravetz 		n_mask = &nodes_allowed;
373879dfc695SMike Kravetz 	} else {
373979dfc695SMike Kravetz 		n_mask = &node_states[N_MEMORY];
374079dfc695SMike Kravetz 	}
374179dfc695SMike Kravetz 
374279dfc695SMike Kravetz 	/* Synchronize with other sysfs operations modifying huge pages */
374379dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
374479dfc695SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
374579dfc695SMike Kravetz 
374679dfc695SMike Kravetz 	while (nr_demote) {
374779dfc695SMike Kravetz 		/*
374879dfc695SMike Kravetz 		 * Check for available pages to demote each time thorough the
374979dfc695SMike Kravetz 		 * loop as demote_pool_huge_page will drop hugetlb_lock.
375079dfc695SMike Kravetz 		 */
375179dfc695SMike Kravetz 		if (nid != NUMA_NO_NODE)
375279dfc695SMike Kravetz 			nr_available = h->free_huge_pages_node[nid];
375379dfc695SMike Kravetz 		else
375479dfc695SMike Kravetz 			nr_available = h->free_huge_pages;
375579dfc695SMike Kravetz 		nr_available -= h->resv_huge_pages;
375679dfc695SMike Kravetz 		if (!nr_available)
375779dfc695SMike Kravetz 			break;
375879dfc695SMike Kravetz 
375979dfc695SMike Kravetz 		err = demote_pool_huge_page(h, n_mask);
376079dfc695SMike Kravetz 		if (err)
376179dfc695SMike Kravetz 			break;
376279dfc695SMike Kravetz 
376379dfc695SMike Kravetz 		nr_demote--;
376479dfc695SMike Kravetz 	}
376579dfc695SMike Kravetz 
376679dfc695SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
376779dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
376879dfc695SMike Kravetz 
376979dfc695SMike Kravetz 	if (err)
377079dfc695SMike Kravetz 		return err;
377179dfc695SMike Kravetz 	return len;
377279dfc695SMike Kravetz }
377379dfc695SMike Kravetz HSTATE_ATTR_WO(demote);
377479dfc695SMike Kravetz 
377579dfc695SMike Kravetz static ssize_t demote_size_show(struct kobject *kobj,
377679dfc695SMike Kravetz 					struct kobj_attribute *attr, char *buf)
377779dfc695SMike Kravetz {
377812658abfSMiaohe Lin 	struct hstate *h = kobj_to_hstate(kobj, NULL);
377979dfc695SMike Kravetz 	unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
378079dfc695SMike Kravetz 
378179dfc695SMike Kravetz 	return sysfs_emit(buf, "%lukB\n", demote_size);
378279dfc695SMike Kravetz }
378379dfc695SMike Kravetz 
378479dfc695SMike Kravetz static ssize_t demote_size_store(struct kobject *kobj,
378579dfc695SMike Kravetz 					struct kobj_attribute *attr,
378679dfc695SMike Kravetz 					const char *buf, size_t count)
378779dfc695SMike Kravetz {
378879dfc695SMike Kravetz 	struct hstate *h, *demote_hstate;
378979dfc695SMike Kravetz 	unsigned long demote_size;
379079dfc695SMike Kravetz 	unsigned int demote_order;
379179dfc695SMike Kravetz 
379279dfc695SMike Kravetz 	demote_size = (unsigned long)memparse(buf, NULL);
379379dfc695SMike Kravetz 
379479dfc695SMike Kravetz 	demote_hstate = size_to_hstate(demote_size);
379579dfc695SMike Kravetz 	if (!demote_hstate)
379679dfc695SMike Kravetz 		return -EINVAL;
379779dfc695SMike Kravetz 	demote_order = demote_hstate->order;
3798a01f4390SMike Kravetz 	if (demote_order < HUGETLB_PAGE_ORDER)
3799a01f4390SMike Kravetz 		return -EINVAL;
380079dfc695SMike Kravetz 
380179dfc695SMike Kravetz 	/* demote order must be smaller than hstate order */
380212658abfSMiaohe Lin 	h = kobj_to_hstate(kobj, NULL);
380379dfc695SMike Kravetz 	if (demote_order >= h->order)
380479dfc695SMike Kravetz 		return -EINVAL;
380579dfc695SMike Kravetz 
380679dfc695SMike Kravetz 	/* resize_lock synchronizes access to demote size and writes */
380779dfc695SMike Kravetz 	mutex_lock(&h->resize_lock);
380879dfc695SMike Kravetz 	h->demote_order = demote_order;
380979dfc695SMike Kravetz 	mutex_unlock(&h->resize_lock);
381079dfc695SMike Kravetz 
381179dfc695SMike Kravetz 	return count;
381279dfc695SMike Kravetz }
381379dfc695SMike Kravetz HSTATE_ATTR(demote_size);
381479dfc695SMike Kravetz 
3815a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
3816a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
3817a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
3818a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
3819a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
3820a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
382106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
382206808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
382306808b08SLee Schermerhorn #endif
3824a3437870SNishanth Aravamudan 	NULL,
3825a3437870SNishanth Aravamudan };
3826a3437870SNishanth Aravamudan 
382767e5ed96SArvind Yadav static const struct attribute_group hstate_attr_group = {
3828a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
3829a3437870SNishanth Aravamudan };
3830a3437870SNishanth Aravamudan 
383179dfc695SMike Kravetz static struct attribute *hstate_demote_attrs[] = {
383279dfc695SMike Kravetz 	&demote_size_attr.attr,
383379dfc695SMike Kravetz 	&demote_attr.attr,
383479dfc695SMike Kravetz 	NULL,
383579dfc695SMike Kravetz };
383679dfc695SMike Kravetz 
383779dfc695SMike Kravetz static const struct attribute_group hstate_demote_attr_group = {
383879dfc695SMike Kravetz 	.attrs = hstate_demote_attrs,
383979dfc695SMike Kravetz };
384079dfc695SMike Kravetz 
3841094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
38429a305230SLee Schermerhorn 				    struct kobject **hstate_kobjs,
384367e5ed96SArvind Yadav 				    const struct attribute_group *hstate_attr_group)
3844a3437870SNishanth Aravamudan {
3845a3437870SNishanth Aravamudan 	int retval;
3846972dc4deSAneesh Kumar K.V 	int hi = hstate_index(h);
3847a3437870SNishanth Aravamudan 
38489a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
38499a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
3850a3437870SNishanth Aravamudan 		return -ENOMEM;
3851a3437870SNishanth Aravamudan 
38529a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3853cc2205a6SMiaohe Lin 	if (retval) {
38549a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
3855cc2205a6SMiaohe Lin 		hstate_kobjs[hi] = NULL;
38563a6bdda0SMiaohe Lin 		return retval;
3857cc2205a6SMiaohe Lin 	}
3858a3437870SNishanth Aravamudan 
385979dfc695SMike Kravetz 	if (h->demote_order) {
386001088a60SMiaohe Lin 		retval = sysfs_create_group(hstate_kobjs[hi],
386101088a60SMiaohe Lin 					    &hstate_demote_attr_group);
386201088a60SMiaohe Lin 		if (retval) {
386379dfc695SMike Kravetz 			pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
386401088a60SMiaohe Lin 			sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
386501088a60SMiaohe Lin 			kobject_put(hstate_kobjs[hi]);
386601088a60SMiaohe Lin 			hstate_kobjs[hi] = NULL;
386701088a60SMiaohe Lin 			return retval;
386801088a60SMiaohe Lin 		}
386979dfc695SMike Kravetz 	}
387079dfc695SMike Kravetz 
387101088a60SMiaohe Lin 	return 0;
3872a3437870SNishanth Aravamudan }
3873a3437870SNishanth Aravamudan 
38749a305230SLee Schermerhorn #ifdef CONFIG_NUMA
3875a4a00b45SMuchun Song static bool hugetlb_sysfs_initialized __ro_after_init;
38769a305230SLee Schermerhorn 
38779a305230SLee Schermerhorn /*
38789a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
387910fbcf4cSKay Sievers  * with node devices in node_devices[] using a parallel array.  The array
388010fbcf4cSKay Sievers  * index of a node device or _hstate == node id.
388110fbcf4cSKay Sievers  * This is here to avoid any static dependency of the node device driver, in
38829a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
38839a305230SLee Schermerhorn  */
38849a305230SLee Schermerhorn struct node_hstate {
38859a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
38869a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
38879a305230SLee Schermerhorn };
3888b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
38899a305230SLee Schermerhorn 
38909a305230SLee Schermerhorn /*
389110fbcf4cSKay Sievers  * A subset of global hstate attributes for node devices
38929a305230SLee Schermerhorn  */
38939a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
38949a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
38959a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
38969a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
38979a305230SLee Schermerhorn 	NULL,
38989a305230SLee Schermerhorn };
38999a305230SLee Schermerhorn 
390067e5ed96SArvind Yadav static const struct attribute_group per_node_hstate_attr_group = {
39019a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
39029a305230SLee Schermerhorn };
39039a305230SLee Schermerhorn 
39049a305230SLee Schermerhorn /*
390510fbcf4cSKay Sievers  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
39069a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
39079a305230SLee Schermerhorn  */
39089a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
39099a305230SLee Schermerhorn {
39109a305230SLee Schermerhorn 	int nid;
39119a305230SLee Schermerhorn 
39129a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
39139a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
39149a305230SLee Schermerhorn 		int i;
39159a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
39169a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
39179a305230SLee Schermerhorn 				if (nidp)
39189a305230SLee Schermerhorn 					*nidp = nid;
39199a305230SLee Schermerhorn 				return &hstates[i];
39209a305230SLee Schermerhorn 			}
39219a305230SLee Schermerhorn 	}
39229a305230SLee Schermerhorn 
39239a305230SLee Schermerhorn 	BUG();
39249a305230SLee Schermerhorn 	return NULL;
39259a305230SLee Schermerhorn }
39269a305230SLee Schermerhorn 
39279a305230SLee Schermerhorn /*
392810fbcf4cSKay Sievers  * Unregister hstate attributes from a single node device.
39299a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
39309a305230SLee Schermerhorn  */
3931a4a00b45SMuchun Song void hugetlb_unregister_node(struct node *node)
39329a305230SLee Schermerhorn {
39339a305230SLee Schermerhorn 	struct hstate *h;
393410fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39359a305230SLee Schermerhorn 
39369a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39379b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
39389a305230SLee Schermerhorn 
3939972dc4deSAneesh Kumar K.V 	for_each_hstate(h) {
3940972dc4deSAneesh Kumar K.V 		int idx = hstate_index(h);
394101088a60SMiaohe Lin 		struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
394201088a60SMiaohe Lin 
394301088a60SMiaohe Lin 		if (!hstate_kobj)
394401088a60SMiaohe Lin 			continue;
394501088a60SMiaohe Lin 		if (h->demote_order)
394601088a60SMiaohe Lin 			sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
394701088a60SMiaohe Lin 		sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
394801088a60SMiaohe Lin 		kobject_put(hstate_kobj);
3949972dc4deSAneesh Kumar K.V 		nhs->hstate_kobjs[idx] = NULL;
3950972dc4deSAneesh Kumar K.V 	}
39519a305230SLee Schermerhorn 
39529a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
39539a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
39549a305230SLee Schermerhorn }
39559a305230SLee Schermerhorn 
39569a305230SLee Schermerhorn 
39579a305230SLee Schermerhorn /*
395810fbcf4cSKay Sievers  * Register hstate attributes for a single node device.
39599a305230SLee Schermerhorn  * No-op if attributes already registered.
39609a305230SLee Schermerhorn  */
3961a4a00b45SMuchun Song void hugetlb_register_node(struct node *node)
39629a305230SLee Schermerhorn {
39639a305230SLee Schermerhorn 	struct hstate *h;
396410fbcf4cSKay Sievers 	struct node_hstate *nhs = &node_hstates[node->dev.id];
39659a305230SLee Schermerhorn 	int err;
39669a305230SLee Schermerhorn 
3967a4a00b45SMuchun Song 	if (!hugetlb_sysfs_initialized)
3968a4a00b45SMuchun Song 		return;
3969a4a00b45SMuchun Song 
39709a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
39719a305230SLee Schermerhorn 		return;		/* already allocated */
39729a305230SLee Schermerhorn 
39739a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
397410fbcf4cSKay Sievers 							&node->dev.kobj);
39759a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
39769a305230SLee Schermerhorn 		return;
39779a305230SLee Schermerhorn 
39789a305230SLee Schermerhorn 	for_each_hstate(h) {
39799a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
39809a305230SLee Schermerhorn 						nhs->hstate_kobjs,
39819a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
39829a305230SLee Schermerhorn 		if (err) {
3983282f4214SMike Kravetz 			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
398410fbcf4cSKay Sievers 				h->name, node->dev.id);
39859a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
39869a305230SLee Schermerhorn 			break;
39879a305230SLee Schermerhorn 		}
39889a305230SLee Schermerhorn 	}
39899a305230SLee Schermerhorn }
39909a305230SLee Schermerhorn 
39919a305230SLee Schermerhorn /*
39929b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
399310fbcf4cSKay Sievers  * devices of nodes that have memory.  All on-line nodes should have
399410fbcf4cSKay Sievers  * registered their associated device by this time.
39959a305230SLee Schermerhorn  */
39967d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
39979a305230SLee Schermerhorn {
39989a305230SLee Schermerhorn 	int nid;
39999a305230SLee Schermerhorn 
4000a4a00b45SMuchun Song 	for_each_online_node(nid)
4001b958d4d0SMuchun Song 		hugetlb_register_node(node_devices[nid]);
40029a305230SLee Schermerhorn }
40039a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
40049a305230SLee Schermerhorn 
40059a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
40069a305230SLee Schermerhorn {
40079a305230SLee Schermerhorn 	BUG();
40089a305230SLee Schermerhorn 	if (nidp)
40099a305230SLee Schermerhorn 		*nidp = -1;
40109a305230SLee Schermerhorn 	return NULL;
40119a305230SLee Schermerhorn }
40129a305230SLee Schermerhorn 
40139a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
40149a305230SLee Schermerhorn 
40159a305230SLee Schermerhorn #endif
40169a305230SLee Schermerhorn 
4017263b8998SMiaohe Lin #ifdef CONFIG_CMA
4018263b8998SMiaohe Lin static void __init hugetlb_cma_check(void);
4019263b8998SMiaohe Lin #else
4020263b8998SMiaohe Lin static inline __init void hugetlb_cma_check(void)
4021263b8998SMiaohe Lin {
4022263b8998SMiaohe Lin }
4023263b8998SMiaohe Lin #endif
4024263b8998SMiaohe Lin 
4025a4a00b45SMuchun Song static void __init hugetlb_sysfs_init(void)
4026a4a00b45SMuchun Song {
4027a4a00b45SMuchun Song 	struct hstate *h;
4028a4a00b45SMuchun Song 	int err;
4029a4a00b45SMuchun Song 
4030a4a00b45SMuchun Song 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4031a4a00b45SMuchun Song 	if (!hugepages_kobj)
4032a4a00b45SMuchun Song 		return;
4033a4a00b45SMuchun Song 
4034a4a00b45SMuchun Song 	for_each_hstate(h) {
4035a4a00b45SMuchun Song 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4036a4a00b45SMuchun Song 					 hstate_kobjs, &hstate_attr_group);
4037a4a00b45SMuchun Song 		if (err)
4038a4a00b45SMuchun Song 			pr_err("HugeTLB: Unable to add hstate %s", h->name);
4039a4a00b45SMuchun Song 	}
4040a4a00b45SMuchun Song 
4041a4a00b45SMuchun Song #ifdef CONFIG_NUMA
4042a4a00b45SMuchun Song 	hugetlb_sysfs_initialized = true;
4043a4a00b45SMuchun Song #endif
4044a4a00b45SMuchun Song 	hugetlb_register_all_nodes();
4045a4a00b45SMuchun Song }
4046a4a00b45SMuchun Song 
4047a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
4048a3437870SNishanth Aravamudan {
40498382d914SDavidlohr Bueso 	int i;
40508382d914SDavidlohr Bueso 
4051d6995da3SMike Kravetz 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4052d6995da3SMike Kravetz 			__NR_HPAGEFLAGS);
4053d6995da3SMike Kravetz 
4054c2833a5bSMike Kravetz 	if (!hugepages_supported()) {
4055c2833a5bSMike Kravetz 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4056c2833a5bSMike Kravetz 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
40570ef89d25SBenjamin Herrenschmidt 		return 0;
4058d715cf80SLiam R. Howlett 	}
4059d715cf80SLiam R. Howlett 
4060282f4214SMike Kravetz 	/*
4061282f4214SMike Kravetz 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4062282f4214SMike Kravetz 	 * architectures depend on setup being done here.
4063282f4214SMike Kravetz 	 */
4064a3437870SNishanth Aravamudan 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4065282f4214SMike Kravetz 	if (!parsed_default_hugepagesz) {
4066282f4214SMike Kravetz 		/*
4067282f4214SMike Kravetz 		 * If we did not parse a default huge page size, set
4068282f4214SMike Kravetz 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4069282f4214SMike Kravetz 		 * number of huge pages for this default size was implicitly
4070282f4214SMike Kravetz 		 * specified, set that here as well.
4071282f4214SMike Kravetz 		 * Note that the implicit setting will overwrite an explicit
4072282f4214SMike Kravetz 		 * setting.  A warning will be printed in this case.
4073282f4214SMike Kravetz 		 */
4074282f4214SMike Kravetz 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4075f8b74815SVaishali Thakkar 		if (default_hstate_max_huge_pages) {
4076282f4214SMike Kravetz 			if (default_hstate.max_huge_pages) {
4077282f4214SMike Kravetz 				char buf[32];
4078282f4214SMike Kravetz 
4079282f4214SMike Kravetz 				string_get_size(huge_page_size(&default_hstate),
4080282f4214SMike Kravetz 					1, STRING_UNITS_2, buf, 32);
4081282f4214SMike Kravetz 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4082282f4214SMike Kravetz 					default_hstate.max_huge_pages, buf);
4083282f4214SMike Kravetz 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4084282f4214SMike Kravetz 					default_hstate_max_huge_pages);
4085282f4214SMike Kravetz 			}
4086282f4214SMike Kravetz 			default_hstate.max_huge_pages =
4087282f4214SMike Kravetz 				default_hstate_max_huge_pages;
4088b5389086SZhenguo Yao 
40890a7a0f6fSPeng Liu 			for_each_online_node(i)
4090b5389086SZhenguo Yao 				default_hstate.max_huge_pages_node[i] =
4091b5389086SZhenguo Yao 					default_hugepages_in_node[i];
4092282f4214SMike Kravetz 		}
4093f8b74815SVaishali Thakkar 	}
4094a3437870SNishanth Aravamudan 
4095cf11e85fSRoman Gushchin 	hugetlb_cma_check();
4096a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
4097aa888a74SAndi Kleen 	gather_bootmem_prealloc();
4098a3437870SNishanth Aravamudan 	report_hugepages();
4099a3437870SNishanth Aravamudan 
4100a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
41017179e7bfSJianguo Wu 	hugetlb_cgroup_file_init();
41029a305230SLee Schermerhorn 
41038382d914SDavidlohr Bueso #ifdef CONFIG_SMP
41048382d914SDavidlohr Bueso 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
41058382d914SDavidlohr Bueso #else
41068382d914SDavidlohr Bueso 	num_fault_mutexes = 1;
41078382d914SDavidlohr Bueso #endif
4108c672c7f2SMike Kravetz 	hugetlb_fault_mutex_table =
41096da2ec56SKees Cook 		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
41106da2ec56SKees Cook 			      GFP_KERNEL);
4111c672c7f2SMike Kravetz 	BUG_ON(!hugetlb_fault_mutex_table);
41128382d914SDavidlohr Bueso 
41138382d914SDavidlohr Bueso 	for (i = 0; i < num_fault_mutexes; i++)
4114c672c7f2SMike Kravetz 		mutex_init(&hugetlb_fault_mutex_table[i]);
4115a3437870SNishanth Aravamudan 	return 0;
4116a3437870SNishanth Aravamudan }
41173e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
4118a3437870SNishanth Aravamudan 
4119ae94da89SMike Kravetz /* Overwritten by architectures with more huge page sizes */
4120ae94da89SMike Kravetz bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
41219fee021dSVaishali Thakkar {
4122ae94da89SMike Kravetz 	return size == HPAGE_SIZE;
41239fee021dSVaishali Thakkar }
41249fee021dSVaishali Thakkar 
4125d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
4126a3437870SNishanth Aravamudan {
4127a3437870SNishanth Aravamudan 	struct hstate *h;
41288faa8b07SAndi Kleen 	unsigned long i;
41298faa8b07SAndi Kleen 
4130a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
4131a3437870SNishanth Aravamudan 		return;
4132a3437870SNishanth Aravamudan 	}
413347d38344SAneesh Kumar K.V 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4134a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
413547d38344SAneesh Kumar K.V 	h = &hstates[hugetlb_max_hstate++];
413629383967SMike Kravetz 	mutex_init(&h->resize_lock);
4137a3437870SNishanth Aravamudan 	h->order = order;
4138aca78307SMiaohe Lin 	h->mask = ~(huge_page_size(h) - 1);
41398faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
41408faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
41410edaecfaSAneesh Kumar K.V 	INIT_LIST_HEAD(&h->hugepage_activelist);
414254f18d35SAndrew Morton 	h->next_nid_to_alloc = first_memory_node;
414354f18d35SAndrew Morton 	h->next_nid_to_free = first_memory_node;
4144a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4145c2c3a60aSMiaohe Lin 					huge_page_size(h)/SZ_1K);
41468faa8b07SAndi Kleen 
4147a3437870SNishanth Aravamudan 	parsed_hstate = h;
4148a3437870SNishanth Aravamudan }
4149a3437870SNishanth Aravamudan 
4150b5389086SZhenguo Yao bool __init __weak hugetlb_node_alloc_supported(void)
4151b5389086SZhenguo Yao {
4152b5389086SZhenguo Yao 	return true;
4153b5389086SZhenguo Yao }
4154f87442f4SPeng Liu 
4155f87442f4SPeng Liu static void __init hugepages_clear_pages_in_node(void)
4156f87442f4SPeng Liu {
4157f87442f4SPeng Liu 	if (!hugetlb_max_hstate) {
4158f87442f4SPeng Liu 		default_hstate_max_huge_pages = 0;
4159f87442f4SPeng Liu 		memset(default_hugepages_in_node, 0,
416010395680SMiaohe Lin 			sizeof(default_hugepages_in_node));
4161f87442f4SPeng Liu 	} else {
4162f87442f4SPeng Liu 		parsed_hstate->max_huge_pages = 0;
4163f87442f4SPeng Liu 		memset(parsed_hstate->max_huge_pages_node, 0,
416410395680SMiaohe Lin 			sizeof(parsed_hstate->max_huge_pages_node));
4165f87442f4SPeng Liu 	}
4166f87442f4SPeng Liu }
4167f87442f4SPeng Liu 
4168282f4214SMike Kravetz /*
4169282f4214SMike Kravetz  * hugepages command line processing
4170282f4214SMike Kravetz  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4171282f4214SMike Kravetz  * specification.  If not, ignore the hugepages value.  hugepages can also
4172282f4214SMike Kravetz  * be the first huge page command line  option in which case it implicitly
4173282f4214SMike Kravetz  * specifies the number of huge pages for the default size.
4174282f4214SMike Kravetz  */
4175282f4214SMike Kravetz static int __init hugepages_setup(char *s)
4176a3437870SNishanth Aravamudan {
4177a3437870SNishanth Aravamudan 	unsigned long *mhp;
41788faa8b07SAndi Kleen 	static unsigned long *last_mhp;
4179b5389086SZhenguo Yao 	int node = NUMA_NO_NODE;
4180b5389086SZhenguo Yao 	int count;
4181b5389086SZhenguo Yao 	unsigned long tmp;
4182b5389086SZhenguo Yao 	char *p = s;
4183a3437870SNishanth Aravamudan 
41849fee021dSVaishali Thakkar 	if (!parsed_valid_hugepagesz) {
4185282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
41869fee021dSVaishali Thakkar 		parsed_valid_hugepagesz = true;
4187f81f6e4bSPeng Liu 		return 1;
41889fee021dSVaishali Thakkar 	}
4189282f4214SMike Kravetz 
4190a3437870SNishanth Aravamudan 	/*
4191282f4214SMike Kravetz 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4192282f4214SMike Kravetz 	 * yet, so this hugepages= parameter goes to the "default hstate".
4193282f4214SMike Kravetz 	 * Otherwise, it goes with the previously parsed hugepagesz or
4194282f4214SMike Kravetz 	 * default_hugepagesz.
4195a3437870SNishanth Aravamudan 	 */
41969fee021dSVaishali Thakkar 	else if (!hugetlb_max_hstate)
4197a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
4198a3437870SNishanth Aravamudan 	else
4199a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
4200a3437870SNishanth Aravamudan 
42018faa8b07SAndi Kleen 	if (mhp == last_mhp) {
4202282f4214SMike Kravetz 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4203f81f6e4bSPeng Liu 		return 1;
42048faa8b07SAndi Kleen 	}
42058faa8b07SAndi Kleen 
4206b5389086SZhenguo Yao 	while (*p) {
4207b5389086SZhenguo Yao 		count = 0;
4208b5389086SZhenguo Yao 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4209b5389086SZhenguo Yao 			goto invalid;
4210b5389086SZhenguo Yao 		/* Parameter is node format */
4211b5389086SZhenguo Yao 		if (p[count] == ':') {
4212b5389086SZhenguo Yao 			if (!hugetlb_node_alloc_supported()) {
4213b5389086SZhenguo Yao 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4214f81f6e4bSPeng Liu 				return 1;
4215b5389086SZhenguo Yao 			}
42160a7a0f6fSPeng Liu 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4217e79ce983SLiu Yuntao 				goto invalid;
42180a7a0f6fSPeng Liu 			node = array_index_nospec(tmp, MAX_NUMNODES);
4219b5389086SZhenguo Yao 			p += count + 1;
4220b5389086SZhenguo Yao 			/* Parse hugepages */
4221b5389086SZhenguo Yao 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4222b5389086SZhenguo Yao 				goto invalid;
4223b5389086SZhenguo Yao 			if (!hugetlb_max_hstate)
4224b5389086SZhenguo Yao 				default_hugepages_in_node[node] = tmp;
4225b5389086SZhenguo Yao 			else
4226b5389086SZhenguo Yao 				parsed_hstate->max_huge_pages_node[node] = tmp;
4227b5389086SZhenguo Yao 			*mhp += tmp;
4228b5389086SZhenguo Yao 			/* Go to parse next node*/
4229b5389086SZhenguo Yao 			if (p[count] == ',')
4230b5389086SZhenguo Yao 				p += count + 1;
4231b5389086SZhenguo Yao 			else
4232b5389086SZhenguo Yao 				break;
4233b5389086SZhenguo Yao 		} else {
4234b5389086SZhenguo Yao 			if (p != s)
4235b5389086SZhenguo Yao 				goto invalid;
4236b5389086SZhenguo Yao 			*mhp = tmp;
4237b5389086SZhenguo Yao 			break;
4238b5389086SZhenguo Yao 		}
4239b5389086SZhenguo Yao 	}
4240a3437870SNishanth Aravamudan 
42418faa8b07SAndi Kleen 	/*
42428faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
424304adbc3fSMiaohe Lin 	 * But we need to allocate gigantic hstates here early to still
42448faa8b07SAndi Kleen 	 * use the bootmem allocator.
42458faa8b07SAndi Kleen 	 */
424604adbc3fSMiaohe Lin 	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
42478faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
42488faa8b07SAndi Kleen 
42498faa8b07SAndi Kleen 	last_mhp = mhp;
42508faa8b07SAndi Kleen 
4251a3437870SNishanth Aravamudan 	return 1;
4252b5389086SZhenguo Yao 
4253b5389086SZhenguo Yao invalid:
4254b5389086SZhenguo Yao 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4255f87442f4SPeng Liu 	hugepages_clear_pages_in_node();
4256f81f6e4bSPeng Liu 	return 1;
4257a3437870SNishanth Aravamudan }
4258282f4214SMike Kravetz __setup("hugepages=", hugepages_setup);
4259e11bfbfcSNick Piggin 
4260282f4214SMike Kravetz /*
4261282f4214SMike Kravetz  * hugepagesz command line processing
4262282f4214SMike Kravetz  * A specific huge page size can only be specified once with hugepagesz.
4263282f4214SMike Kravetz  * hugepagesz is followed by hugepages on the command line.  The global
4264282f4214SMike Kravetz  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4265282f4214SMike Kravetz  * hugepagesz argument was valid.
4266282f4214SMike Kravetz  */
4267359f2544SMike Kravetz static int __init hugepagesz_setup(char *s)
4268e11bfbfcSNick Piggin {
4269359f2544SMike Kravetz 	unsigned long size;
4270282f4214SMike Kravetz 	struct hstate *h;
4271282f4214SMike Kravetz 
4272282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4273359f2544SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4274359f2544SMike Kravetz 
4275359f2544SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4276282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4277f81f6e4bSPeng Liu 		return 1;
4278359f2544SMike Kravetz 	}
4279359f2544SMike Kravetz 
4280282f4214SMike Kravetz 	h = size_to_hstate(size);
4281282f4214SMike Kravetz 	if (h) {
4282282f4214SMike Kravetz 		/*
4283282f4214SMike Kravetz 		 * hstate for this size already exists.  This is normally
4284282f4214SMike Kravetz 		 * an error, but is allowed if the existing hstate is the
4285282f4214SMike Kravetz 		 * default hstate.  More specifically, it is only allowed if
4286282f4214SMike Kravetz 		 * the number of huge pages for the default hstate was not
4287282f4214SMike Kravetz 		 * previously specified.
4288282f4214SMike Kravetz 		 */
4289282f4214SMike Kravetz 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4290282f4214SMike Kravetz 		    default_hstate.max_huge_pages) {
4291282f4214SMike Kravetz 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4292f81f6e4bSPeng Liu 			return 1;
429338237830SMike Kravetz 		}
429438237830SMike Kravetz 
4295282f4214SMike Kravetz 		/*
4296282f4214SMike Kravetz 		 * No need to call hugetlb_add_hstate() as hstate already
4297282f4214SMike Kravetz 		 * exists.  But, do set parsed_hstate so that a following
4298282f4214SMike Kravetz 		 * hugepages= parameter will be applied to this hstate.
4299282f4214SMike Kravetz 		 */
4300282f4214SMike Kravetz 		parsed_hstate = h;
4301282f4214SMike Kravetz 		parsed_valid_hugepagesz = true;
4302e11bfbfcSNick Piggin 		return 1;
4303e11bfbfcSNick Piggin 	}
4304282f4214SMike Kravetz 
4305359f2544SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4306282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4307359f2544SMike Kravetz 	return 1;
4308359f2544SMike Kravetz }
4309359f2544SMike Kravetz __setup("hugepagesz=", hugepagesz_setup);
4310359f2544SMike Kravetz 
4311282f4214SMike Kravetz /*
4312282f4214SMike Kravetz  * default_hugepagesz command line input
4313282f4214SMike Kravetz  * Only one instance of default_hugepagesz allowed on command line.
4314282f4214SMike Kravetz  */
4315ae94da89SMike Kravetz static int __init default_hugepagesz_setup(char *s)
4316e11bfbfcSNick Piggin {
4317ae94da89SMike Kravetz 	unsigned long size;
4318b5389086SZhenguo Yao 	int i;
4319ae94da89SMike Kravetz 
4320282f4214SMike Kravetz 	parsed_valid_hugepagesz = false;
4321282f4214SMike Kravetz 	if (parsed_default_hugepagesz) {
4322282f4214SMike Kravetz 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4323f81f6e4bSPeng Liu 		return 1;
4324282f4214SMike Kravetz 	}
4325282f4214SMike Kravetz 
4326282f4214SMike Kravetz 	size = (unsigned long)memparse(s, NULL);
4327282f4214SMike Kravetz 
4328282f4214SMike Kravetz 	if (!arch_hugetlb_valid_size(size)) {
4329282f4214SMike Kravetz 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4330f81f6e4bSPeng Liu 		return 1;
4331282f4214SMike Kravetz 	}
4332282f4214SMike Kravetz 
4333282f4214SMike Kravetz 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4334282f4214SMike Kravetz 	parsed_valid_hugepagesz = true;
4335282f4214SMike Kravetz 	parsed_default_hugepagesz = true;
4336282f4214SMike Kravetz 	default_hstate_idx = hstate_index(size_to_hstate(size));
4337282f4214SMike Kravetz 
4338282f4214SMike Kravetz 	/*
4339282f4214SMike Kravetz 	 * The number of default huge pages (for this size) could have been
4340282f4214SMike Kravetz 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4341282f4214SMike Kravetz 	 * then default_hstate_max_huge_pages is set.  If the default huge
4342282f4214SMike Kravetz 	 * page size is gigantic (>= MAX_ORDER), then the pages must be
4343282f4214SMike Kravetz 	 * allocated here from bootmem allocator.
4344282f4214SMike Kravetz 	 */
4345282f4214SMike Kravetz 	if (default_hstate_max_huge_pages) {
4346282f4214SMike Kravetz 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
43470a7a0f6fSPeng Liu 		for_each_online_node(i)
4348b5389086SZhenguo Yao 			default_hstate.max_huge_pages_node[i] =
4349b5389086SZhenguo Yao 				default_hugepages_in_node[i];
4350282f4214SMike Kravetz 		if (hstate_is_gigantic(&default_hstate))
4351282f4214SMike Kravetz 			hugetlb_hstate_alloc_pages(&default_hstate);
4352282f4214SMike Kravetz 		default_hstate_max_huge_pages = 0;
4353282f4214SMike Kravetz 	}
4354282f4214SMike Kravetz 
4355e11bfbfcSNick Piggin 	return 1;
4356e11bfbfcSNick Piggin }
4357ae94da89SMike Kravetz __setup("default_hugepagesz=", default_hugepagesz_setup);
4358a3437870SNishanth Aravamudan 
4359d2226ebdSFeng Tang static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4360d2226ebdSFeng Tang {
4361d2226ebdSFeng Tang #ifdef CONFIG_NUMA
4362d2226ebdSFeng Tang 	struct mempolicy *mpol = get_task_policy(current);
4363d2226ebdSFeng Tang 
4364d2226ebdSFeng Tang 	/*
4365d2226ebdSFeng Tang 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4366d2226ebdSFeng Tang 	 * (from policy_nodemask) specifically for hugetlb case
4367d2226ebdSFeng Tang 	 */
4368d2226ebdSFeng Tang 	if (mpol->mode == MPOL_BIND &&
4369d2226ebdSFeng Tang 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
4370d2226ebdSFeng Tang 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4371d2226ebdSFeng Tang 		return &mpol->nodes;
4372d2226ebdSFeng Tang #endif
4373d2226ebdSFeng Tang 	return NULL;
4374d2226ebdSFeng Tang }
4375d2226ebdSFeng Tang 
43768ca39e68SMuchun Song static unsigned int allowed_mems_nr(struct hstate *h)
43778a213460SNishanth Aravamudan {
43788a213460SNishanth Aravamudan 	int node;
43798a213460SNishanth Aravamudan 	unsigned int nr = 0;
4380d2226ebdSFeng Tang 	nodemask_t *mbind_nodemask;
43818ca39e68SMuchun Song 	unsigned int *array = h->free_huge_pages_node;
43828ca39e68SMuchun Song 	gfp_t gfp_mask = htlb_alloc_mask(h);
43838a213460SNishanth Aravamudan 
4384d2226ebdSFeng Tang 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
43858ca39e68SMuchun Song 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4386d2226ebdSFeng Tang 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
43878a213460SNishanth Aravamudan 			nr += array[node];
43888ca39e68SMuchun Song 	}
43898a213460SNishanth Aravamudan 
43908a213460SNishanth Aravamudan 	return nr;
43918a213460SNishanth Aravamudan }
43928a213460SNishanth Aravamudan 
43938a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
439417743798SMuchun Song static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
439517743798SMuchun Song 					  void *buffer, size_t *length,
439617743798SMuchun Song 					  loff_t *ppos, unsigned long *out)
439717743798SMuchun Song {
439817743798SMuchun Song 	struct ctl_table dup_table;
439917743798SMuchun Song 
440017743798SMuchun Song 	/*
440117743798SMuchun Song 	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
440217743798SMuchun Song 	 * can duplicate the @table and alter the duplicate of it.
440317743798SMuchun Song 	 */
440417743798SMuchun Song 	dup_table = *table;
440517743798SMuchun Song 	dup_table.data = out;
440617743798SMuchun Song 
440717743798SMuchun Song 	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
440817743798SMuchun Song }
440917743798SMuchun Song 
441006808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
441106808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
441232927393SChristoph Hellwig 			 void *buffer, size_t *length, loff_t *ppos)
44131da177e4SLinus Torvalds {
4414e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
4415238d3c13SDavid Rientjes 	unsigned long tmp = h->max_huge_pages;
441608d4a246SMichal Hocko 	int ret;
4417e5ff2159SAndi Kleen 
4418457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
441986613628SJan Stancek 		return -EOPNOTSUPP;
4420457c1b27SNishanth Aravamudan 
442117743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
442217743798SMuchun Song 					     &tmp);
442308d4a246SMichal Hocko 	if (ret)
442408d4a246SMichal Hocko 		goto out;
4425e5ff2159SAndi Kleen 
4426238d3c13SDavid Rientjes 	if (write)
4427238d3c13SDavid Rientjes 		ret = __nr_hugepages_store_common(obey_mempolicy, h,
4428238d3c13SDavid Rientjes 						  NUMA_NO_NODE, tmp, *length);
442908d4a246SMichal Hocko out:
443008d4a246SMichal Hocko 	return ret;
44311da177e4SLinus Torvalds }
4432396faf03SMel Gorman 
443306808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
443432927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
443506808b08SLee Schermerhorn {
443606808b08SLee Schermerhorn 
443706808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
443806808b08SLee Schermerhorn 							buffer, length, ppos);
443906808b08SLee Schermerhorn }
444006808b08SLee Schermerhorn 
444106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
444206808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
444332927393SChristoph Hellwig 			  void *buffer, size_t *length, loff_t *ppos)
444406808b08SLee Schermerhorn {
444506808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
444606808b08SLee Schermerhorn 							buffer, length, ppos);
444706808b08SLee Schermerhorn }
444806808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
444906808b08SLee Schermerhorn 
4450a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
445132927393SChristoph Hellwig 		void *buffer, size_t *length, loff_t *ppos)
4452a3d0c6aaSNishanth Aravamudan {
4453a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
4454e5ff2159SAndi Kleen 	unsigned long tmp;
445508d4a246SMichal Hocko 	int ret;
4456e5ff2159SAndi Kleen 
4457457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
445886613628SJan Stancek 		return -EOPNOTSUPP;
4459457c1b27SNishanth Aravamudan 
4460e5ff2159SAndi Kleen 	tmp = h->nr_overcommit_huge_pages;
4461e5ff2159SAndi Kleen 
4462bae7f4aeSLuiz Capitulino 	if (write && hstate_is_gigantic(h))
4463adbe8726SEric B Munson 		return -EINVAL;
4464adbe8726SEric B Munson 
446517743798SMuchun Song 	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
446617743798SMuchun Song 					     &tmp);
446708d4a246SMichal Hocko 	if (ret)
446808d4a246SMichal Hocko 		goto out;
4469e5ff2159SAndi Kleen 
4470e5ff2159SAndi Kleen 	if (write) {
4471db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
4472e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
4473db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
4474e5ff2159SAndi Kleen 	}
447508d4a246SMichal Hocko out:
447608d4a246SMichal Hocko 	return ret;
4477a3d0c6aaSNishanth Aravamudan }
4478a3d0c6aaSNishanth Aravamudan 
44791da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
44801da177e4SLinus Torvalds 
4481e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
44821da177e4SLinus Torvalds {
4483fcb2b0c5SRoman Gushchin 	struct hstate *h;
4484fcb2b0c5SRoman Gushchin 	unsigned long total = 0;
4485fcb2b0c5SRoman Gushchin 
4486457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4487457c1b27SNishanth Aravamudan 		return;
4488fcb2b0c5SRoman Gushchin 
4489fcb2b0c5SRoman Gushchin 	for_each_hstate(h) {
4490fcb2b0c5SRoman Gushchin 		unsigned long count = h->nr_huge_pages;
4491fcb2b0c5SRoman Gushchin 
4492aca78307SMiaohe Lin 		total += huge_page_size(h) * count;
4493fcb2b0c5SRoman Gushchin 
4494fcb2b0c5SRoman Gushchin 		if (h == &default_hstate)
4495e1759c21SAlexey Dobriyan 			seq_printf(m,
44961da177e4SLinus Torvalds 				   "HugePages_Total:   %5lu\n"
44971da177e4SLinus Torvalds 				   "HugePages_Free:    %5lu\n"
4498b45b5bd6SDavid Gibson 				   "HugePages_Rsvd:    %5lu\n"
44997893d1d5SAdam Litke 				   "HugePages_Surp:    %5lu\n"
45004f98a2feSRik van Riel 				   "Hugepagesize:   %8lu kB\n",
4501fcb2b0c5SRoman Gushchin 				   count,
4502a5516438SAndi Kleen 				   h->free_huge_pages,
4503a5516438SAndi Kleen 				   h->resv_huge_pages,
4504a5516438SAndi Kleen 				   h->surplus_huge_pages,
4505aca78307SMiaohe Lin 				   huge_page_size(h) / SZ_1K);
4506fcb2b0c5SRoman Gushchin 	}
4507fcb2b0c5SRoman Gushchin 
4508aca78307SMiaohe Lin 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
45091da177e4SLinus Torvalds }
45101da177e4SLinus Torvalds 
45117981593bSJoe Perches int hugetlb_report_node_meminfo(char *buf, int len, int nid)
45121da177e4SLinus Torvalds {
4513a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
45147981593bSJoe Perches 
4515457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4516457c1b27SNishanth Aravamudan 		return 0;
45177981593bSJoe Perches 
45187981593bSJoe Perches 	return sysfs_emit_at(buf, len,
45191da177e4SLinus Torvalds 			     "Node %d HugePages_Total: %5u\n"
4520a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Free:  %5u\n"
4521a1de0919SNishanth Aravamudan 			     "Node %d HugePages_Surp:  %5u\n",
4522a5516438SAndi Kleen 			     nid, h->nr_huge_pages_node[nid],
4523a5516438SAndi Kleen 			     nid, h->free_huge_pages_node[nid],
4524a5516438SAndi Kleen 			     nid, h->surplus_huge_pages_node[nid]);
45251da177e4SLinus Torvalds }
45261da177e4SLinus Torvalds 
4527dcadcf1cSGang Li void hugetlb_show_meminfo_node(int nid)
4528949f7ec5SDavid Rientjes {
4529949f7ec5SDavid Rientjes 	struct hstate *h;
4530949f7ec5SDavid Rientjes 
4531457c1b27SNishanth Aravamudan 	if (!hugepages_supported())
4532457c1b27SNishanth Aravamudan 		return;
4533457c1b27SNishanth Aravamudan 
4534949f7ec5SDavid Rientjes 	for_each_hstate(h)
4535dcadcf1cSGang Li 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4536949f7ec5SDavid Rientjes 			nid,
4537949f7ec5SDavid Rientjes 			h->nr_huge_pages_node[nid],
4538949f7ec5SDavid Rientjes 			h->free_huge_pages_node[nid],
4539949f7ec5SDavid Rientjes 			h->surplus_huge_pages_node[nid],
4540aca78307SMiaohe Lin 			huge_page_size(h) / SZ_1K);
4541949f7ec5SDavid Rientjes }
4542949f7ec5SDavid Rientjes 
45435d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
45445d317b2bSNaoya Horiguchi {
45455d317b2bSNaoya Horiguchi 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
45465d317b2bSNaoya Horiguchi 		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
45475d317b2bSNaoya Horiguchi }
45485d317b2bSNaoya Horiguchi 
45491da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
45501da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
45511da177e4SLinus Torvalds {
4552d0028588SWanpeng Li 	struct hstate *h;
4553d0028588SWanpeng Li 	unsigned long nr_total_pages = 0;
4554d0028588SWanpeng Li 
4555d0028588SWanpeng Li 	for_each_hstate(h)
4556d0028588SWanpeng Li 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4557d0028588SWanpeng Li 	return nr_total_pages;
45581da177e4SLinus Torvalds }
45591da177e4SLinus Torvalds 
4560a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
4561fc1b8a73SMel Gorman {
4562fc1b8a73SMel Gorman 	int ret = -ENOMEM;
4563fc1b8a73SMel Gorman 
45640aa7f354SMiaohe Lin 	if (!delta)
45650aa7f354SMiaohe Lin 		return 0;
45660aa7f354SMiaohe Lin 
4567db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
4568fc1b8a73SMel Gorman 	/*
4569fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
4570fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
4571fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
4572fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
4573fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
4574fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
4575fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
4576fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
4577fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
4578fc1b8a73SMel Gorman 	 *
4579fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
4580fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
4581fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
4582fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
4583fc1b8a73SMel Gorman 	 * semantics that cpuset has.
45848ca39e68SMuchun Song 	 *
45858ca39e68SMuchun Song 	 * Apart from cpuset, we also have memory policy mechanism that
45868ca39e68SMuchun Song 	 * also determines from which node the kernel will allocate memory
45878ca39e68SMuchun Song 	 * in a NUMA system. So similar to cpuset, we also should consider
45888ca39e68SMuchun Song 	 * the memory policy of the current task. Similar to the description
45898ca39e68SMuchun Song 	 * above.
4590fc1b8a73SMel Gorman 	 */
4591fc1b8a73SMel Gorman 	if (delta > 0) {
4592a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
4593fc1b8a73SMel Gorman 			goto out;
4594fc1b8a73SMel Gorman 
45958ca39e68SMuchun Song 		if (delta > allowed_mems_nr(h)) {
4596a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
4597fc1b8a73SMel Gorman 			goto out;
4598fc1b8a73SMel Gorman 		}
4599fc1b8a73SMel Gorman 	}
4600fc1b8a73SMel Gorman 
4601fc1b8a73SMel Gorman 	ret = 0;
4602fc1b8a73SMel Gorman 	if (delta < 0)
4603a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
4604fc1b8a73SMel Gorman 
4605fc1b8a73SMel Gorman out:
4606db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
4607fc1b8a73SMel Gorman 	return ret;
4608fc1b8a73SMel Gorman }
4609fc1b8a73SMel Gorman 
461084afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
461184afd99bSAndy Whitcroft {
4612f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
461384afd99bSAndy Whitcroft 
461484afd99bSAndy Whitcroft 	/*
461584afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
461684afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
461784afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
461825985edcSLucas De Marchi 	 * has a reference to the reservation map it cannot disappear until
461984afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
462084afd99bSAndy Whitcroft 	 * new reference here without additional locking.
462184afd99bSAndy Whitcroft 	 */
462209a26e83SMike Kravetz 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
462309a26e83SMike Kravetz 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4624f522c3acSJoonsoo Kim 		kref_get(&resv->refs);
462584afd99bSAndy Whitcroft 	}
462609a26e83SMike Kravetz }
462784afd99bSAndy Whitcroft 
4628a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4629a1e78772SMel Gorman {
4630a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
4631f522c3acSJoonsoo Kim 	struct resv_map *resv = vma_resv_map(vma);
463290481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_vma(vma);
46334e35f483SJoonsoo Kim 	unsigned long reserve, start, end;
46341c5ecae3SMike Kravetz 	long gbl_reserve;
463584afd99bSAndy Whitcroft 
46364e35f483SJoonsoo Kim 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
46374e35f483SJoonsoo Kim 		return;
46384e35f483SJoonsoo Kim 
4639a5516438SAndi Kleen 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4640a5516438SAndi Kleen 	end = vma_hugecache_offset(h, vma, vma->vm_end);
464184afd99bSAndy Whitcroft 
46424e35f483SJoonsoo Kim 	reserve = (end - start) - region_count(resv, start, end);
4643e9fe92aeSMina Almasry 	hugetlb_cgroup_uncharge_counter(resv, start, end);
46447251ff78SAdam Litke 	if (reserve) {
46451c5ecae3SMike Kravetz 		/*
46461c5ecae3SMike Kravetz 		 * Decrement reserve counts.  The global reserve count may be
46471c5ecae3SMike Kravetz 		 * adjusted if the subpool has a minimum size.
46481c5ecae3SMike Kravetz 		 */
46491c5ecae3SMike Kravetz 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
46501c5ecae3SMike Kravetz 		hugetlb_acct_memory(h, -gbl_reserve);
46517251ff78SAdam Litke 	}
4652e9fe92aeSMina Almasry 
4653e9fe92aeSMina Almasry 	kref_put(&resv->refs, resv_map_release);
4654a1e78772SMel Gorman }
4655a1e78772SMel Gorman 
465631383c68SDan Williams static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
465731383c68SDan Williams {
465831383c68SDan Williams 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
465931383c68SDan Williams 		return -EINVAL;
466031383c68SDan Williams 	return 0;
466131383c68SDan Williams }
466231383c68SDan Williams 
466305ea8860SDan Williams static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
466405ea8860SDan Williams {
4665aca78307SMiaohe Lin 	return huge_page_size(hstate_vma(vma));
466605ea8860SDan Williams }
466705ea8860SDan Williams 
46681da177e4SLinus Torvalds /*
46691da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
46701da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
46716c26d310SMiaohe Lin  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
46721da177e4SLinus Torvalds  * this far.
46731da177e4SLinus Torvalds  */
4674b3ec9f33SSouptick Joarder static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
46751da177e4SLinus Torvalds {
46761da177e4SLinus Torvalds 	BUG();
4677d0217ac0SNick Piggin 	return 0;
46781da177e4SLinus Torvalds }
46791da177e4SLinus Torvalds 
4680eec3636aSJane Chu /*
4681eec3636aSJane Chu  * When a new function is introduced to vm_operations_struct and added
4682eec3636aSJane Chu  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4683eec3636aSJane Chu  * This is because under System V memory model, mappings created via
4684eec3636aSJane Chu  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4685eec3636aSJane Chu  * their original vm_ops are overwritten with shm_vm_ops.
4686eec3636aSJane Chu  */
4687f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
4688d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
468984afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
4690a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
4691dd3b614fSDmitry Safonov 	.may_split = hugetlb_vm_op_split,
469205ea8860SDan Williams 	.pagesize = hugetlb_vm_op_pagesize,
46931da177e4SLinus Torvalds };
46941da177e4SLinus Torvalds 
46951e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
46961e8f889bSDavid Gibson 				int writable)
469763551ae0SDavid Gibson {
469863551ae0SDavid Gibson 	pte_t entry;
469979c1c594SChristophe Leroy 	unsigned int shift = huge_page_shift(hstate_vma(vma));
470063551ae0SDavid Gibson 
47011e8f889bSDavid Gibson 	if (writable) {
4702106c992aSGerald Schaefer 		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4703106c992aSGerald Schaefer 					 vma->vm_page_prot)));
470463551ae0SDavid Gibson 	} else {
4705106c992aSGerald Schaefer 		entry = huge_pte_wrprotect(mk_huge_pte(page,
4706106c992aSGerald Schaefer 					   vma->vm_page_prot));
470763551ae0SDavid Gibson 	}
470863551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
470979c1c594SChristophe Leroy 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
471063551ae0SDavid Gibson 
471163551ae0SDavid Gibson 	return entry;
471263551ae0SDavid Gibson }
471363551ae0SDavid Gibson 
47141e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
47151e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
47161e8f889bSDavid Gibson {
47171e8f889bSDavid Gibson 	pte_t entry;
47181e8f889bSDavid Gibson 
4719106c992aSGerald Schaefer 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
472032f84528SChris Forbes 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
47214b3073e1SRussell King 		update_mmu_cache(vma, address, ptep);
47221e8f889bSDavid Gibson }
47231e8f889bSDavid Gibson 
4724d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte)
47254a705fefSNaoya Horiguchi {
47264a705fefSNaoya Horiguchi 	swp_entry_t swp;
47274a705fefSNaoya Horiguchi 
47284a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
4729d5ed7444SAneesh Kumar K.V 		return false;
47304a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4731d79d176aSBaoquan He 	if (is_migration_entry(swp))
4732d5ed7444SAneesh Kumar K.V 		return true;
47334a705fefSNaoya Horiguchi 	else
4734d5ed7444SAneesh Kumar K.V 		return false;
47354a705fefSNaoya Horiguchi }
47364a705fefSNaoya Horiguchi 
47373e5c3600SBaoquan He static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
47384a705fefSNaoya Horiguchi {
47394a705fefSNaoya Horiguchi 	swp_entry_t swp;
47404a705fefSNaoya Horiguchi 
47414a705fefSNaoya Horiguchi 	if (huge_pte_none(pte) || pte_present(pte))
47423e5c3600SBaoquan He 		return false;
47434a705fefSNaoya Horiguchi 	swp = pte_to_swp_entry(pte);
4744d79d176aSBaoquan He 	if (is_hwpoison_entry(swp))
47453e5c3600SBaoquan He 		return true;
47464a705fefSNaoya Horiguchi 	else
47473e5c3600SBaoquan He 		return false;
47484a705fefSNaoya Horiguchi }
47491e8f889bSDavid Gibson 
47504eae4efaSPeter Xu static void
47514eae4efaSPeter Xu hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
47524eae4efaSPeter Xu 		     struct page *new_page)
47534eae4efaSPeter Xu {
47544eae4efaSPeter Xu 	__SetPageUptodate(new_page);
47554eae4efaSPeter Xu 	hugepage_add_new_anon_rmap(new_page, vma, addr);
47561eba86c0SPasha Tatashin 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
47574eae4efaSPeter Xu 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
47584eae4efaSPeter Xu 	ClearHPageRestoreReserve(new_page);
47594eae4efaSPeter Xu 	SetHPageMigratable(new_page);
47604eae4efaSPeter Xu }
47614eae4efaSPeter Xu 
476263551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4763bc70fbf2SPeter Xu 			    struct vm_area_struct *dst_vma,
4764bc70fbf2SPeter Xu 			    struct vm_area_struct *src_vma)
476563551ae0SDavid Gibson {
47663aa4ed80SMiaohe Lin 	pte_t *src_pte, *dst_pte, entry;
476763551ae0SDavid Gibson 	struct page *ptepage;
47681c59827dSHugh Dickins 	unsigned long addr;
4769bc70fbf2SPeter Xu 	bool cow = is_cow_mapping(src_vma->vm_flags);
4770bc70fbf2SPeter Xu 	struct hstate *h = hstate_vma(src_vma);
4771a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
47724eae4efaSPeter Xu 	unsigned long npages = pages_per_huge_page(h);
4773ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
4774e95a9851SMike Kravetz 	unsigned long last_addr_mask;
4775e8569dd2SAndreas Sandberg 	int ret = 0;
47761e8f889bSDavid Gibson 
4777ac46d4f3SJérôme Glisse 	if (cow) {
4778bc70fbf2SPeter Xu 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
4779bc70fbf2SPeter Xu 					src_vma->vm_start,
4780bc70fbf2SPeter Xu 					src_vma->vm_end);
4781ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_start(&range);
4782623a1ddfSDavid Hildenbrand 		mmap_assert_write_locked(src);
4783623a1ddfSDavid Hildenbrand 		raw_write_seqcount_begin(&src->write_protect_seq);
4784ac46d4f3SJérôme Glisse 	}
4785e8569dd2SAndreas Sandberg 
4786e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
4787bc70fbf2SPeter Xu 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4788cb900f41SKirill A. Shutemov 		spinlock_t *src_ptl, *dst_ptl;
47897868a208SPunit Agrawal 		src_pte = huge_pte_offset(src, addr, sz);
4790e95a9851SMike Kravetz 		if (!src_pte) {
4791e95a9851SMike Kravetz 			addr |= last_addr_mask;
4792c74df32cSHugh Dickins 			continue;
4793e95a9851SMike Kravetz 		}
4794bc70fbf2SPeter Xu 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4795e8569dd2SAndreas Sandberg 		if (!dst_pte) {
4796e8569dd2SAndreas Sandberg 			ret = -ENOMEM;
4797e8569dd2SAndreas Sandberg 			break;
4798e8569dd2SAndreas Sandberg 		}
4799c5c99429SLarry Woodman 
48005e41540cSMike Kravetz 		/*
48015e41540cSMike Kravetz 		 * If the pagetables are shared don't copy or take references.
48025e41540cSMike Kravetz 		 *
48033aa4ed80SMiaohe Lin 		 * dst_pte == src_pte is the common case of src/dest sharing.
48045e41540cSMike Kravetz 		 * However, src could have 'unshared' and dst shares with
48053aa4ed80SMiaohe Lin 		 * another vma. So page_count of ptep page is checked instead
48063aa4ed80SMiaohe Lin 		 * to reliably determine whether pte is shared.
48075e41540cSMike Kravetz 		 */
48083aa4ed80SMiaohe Lin 		if (page_count(virt_to_page(dst_pte)) > 1) {
4809e95a9851SMike Kravetz 			addr |= last_addr_mask;
4810c5c99429SLarry Woodman 			continue;
4811e95a9851SMike Kravetz 		}
4812c5c99429SLarry Woodman 
4813cb900f41SKirill A. Shutemov 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4814cb900f41SKirill A. Shutemov 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4815cb900f41SKirill A. Shutemov 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
48164a705fefSNaoya Horiguchi 		entry = huge_ptep_get(src_pte);
48174eae4efaSPeter Xu again:
48183aa4ed80SMiaohe Lin 		if (huge_pte_none(entry)) {
48195e41540cSMike Kravetz 			/*
48203aa4ed80SMiaohe Lin 			 * Skip if src entry none.
48215e41540cSMike Kravetz 			 */
48224a705fefSNaoya Horiguchi 			;
4823c2cb0dccSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
4824c2cb0dccSNaoya Horiguchi 			bool uffd_wp = huge_pte_uffd_wp(entry);
4825c2cb0dccSNaoya Horiguchi 
4826c2cb0dccSNaoya Horiguchi 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4827c2cb0dccSNaoya Horiguchi 				entry = huge_pte_clear_uffd_wp(entry);
4828c2cb0dccSNaoya Horiguchi 			set_huge_pte_at(dst, addr, dst_pte, entry);
4829c2cb0dccSNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_migration(entry))) {
48304a705fefSNaoya Horiguchi 			swp_entry_t swp_entry = pte_to_swp_entry(entry);
4831bc70fbf2SPeter Xu 			bool uffd_wp = huge_pte_uffd_wp(entry);
48324a705fefSNaoya Horiguchi 
48336c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(swp_entry) && cow) {
48344a705fefSNaoya Horiguchi 				/*
48354a705fefSNaoya Horiguchi 				 * COW mappings require pages in both
48364a705fefSNaoya Horiguchi 				 * parent and child to be set to read.
48374a705fefSNaoya Horiguchi 				 */
48384dd845b5SAlistair Popple 				swp_entry = make_readable_migration_entry(
48394dd845b5SAlistair Popple 							swp_offset(swp_entry));
48404a705fefSNaoya Horiguchi 				entry = swp_entry_to_pte(swp_entry);
4841bc70fbf2SPeter Xu 				if (userfaultfd_wp(src_vma) && uffd_wp)
4842bc70fbf2SPeter Xu 					entry = huge_pte_mkuffd_wp(entry);
484318f39629SQi Zheng 				set_huge_pte_at(src, addr, src_pte, entry);
48444a705fefSNaoya Horiguchi 			}
4845bc70fbf2SPeter Xu 			if (!userfaultfd_wp(dst_vma) && uffd_wp)
4846bc70fbf2SPeter Xu 				entry = huge_pte_clear_uffd_wp(entry);
484718f39629SQi Zheng 			set_huge_pte_at(dst, addr, dst_pte, entry);
4848bc70fbf2SPeter Xu 		} else if (unlikely(is_pte_marker(entry))) {
4849bc70fbf2SPeter Xu 			/*
4850bc70fbf2SPeter Xu 			 * We copy the pte marker only if the dst vma has
4851bc70fbf2SPeter Xu 			 * uffd-wp enabled.
4852bc70fbf2SPeter Xu 			 */
4853bc70fbf2SPeter Xu 			if (userfaultfd_wp(dst_vma))
4854bc70fbf2SPeter Xu 				set_huge_pte_at(dst, addr, dst_pte, entry);
48554a705fefSNaoya Horiguchi 		} else {
48564eae4efaSPeter Xu 			entry = huge_ptep_get(src_pte);
48574eae4efaSPeter Xu 			ptepage = pte_page(entry);
48584eae4efaSPeter Xu 			get_page(ptepage);
48594eae4efaSPeter Xu 
48604eae4efaSPeter Xu 			/*
4861fb3d824dSDavid Hildenbrand 			 * Failing to duplicate the anon rmap is a rare case
4862fb3d824dSDavid Hildenbrand 			 * where we see pinned hugetlb pages while they're
4863fb3d824dSDavid Hildenbrand 			 * prone to COW. We need to do the COW earlier during
4864fb3d824dSDavid Hildenbrand 			 * fork.
48654eae4efaSPeter Xu 			 *
48664eae4efaSPeter Xu 			 * When pre-allocating the page or copying data, we
48674eae4efaSPeter Xu 			 * need to be without the pgtable locks since we could
48684eae4efaSPeter Xu 			 * sleep during the process.
48694eae4efaSPeter Xu 			 */
4870fb3d824dSDavid Hildenbrand 			if (!PageAnon(ptepage)) {
4871fb3d824dSDavid Hildenbrand 				page_dup_file_rmap(ptepage, true);
4872bc70fbf2SPeter Xu 			} else if (page_try_dup_anon_rmap(ptepage, true,
4873bc70fbf2SPeter Xu 							  src_vma)) {
48744eae4efaSPeter Xu 				pte_t src_pte_old = entry;
48754eae4efaSPeter Xu 				struct page *new;
48764eae4efaSPeter Xu 
48774eae4efaSPeter Xu 				spin_unlock(src_ptl);
48784eae4efaSPeter Xu 				spin_unlock(dst_ptl);
48794eae4efaSPeter Xu 				/* Do not use reserve as it's private owned */
4880bc70fbf2SPeter Xu 				new = alloc_huge_page(dst_vma, addr, 1);
48814eae4efaSPeter Xu 				if (IS_ERR(new)) {
48824eae4efaSPeter Xu 					put_page(ptepage);
48834eae4efaSPeter Xu 					ret = PTR_ERR(new);
48844eae4efaSPeter Xu 					break;
48854eae4efaSPeter Xu 				}
4886bc70fbf2SPeter Xu 				copy_user_huge_page(new, ptepage, addr, dst_vma,
48874eae4efaSPeter Xu 						    npages);
48884eae4efaSPeter Xu 				put_page(ptepage);
48894eae4efaSPeter Xu 
48904eae4efaSPeter Xu 				/* Install the new huge page if src pte stable */
48914eae4efaSPeter Xu 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
48924eae4efaSPeter Xu 				src_ptl = huge_pte_lockptr(h, src, src_pte);
48934eae4efaSPeter Xu 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
48944eae4efaSPeter Xu 				entry = huge_ptep_get(src_pte);
48954eae4efaSPeter Xu 				if (!pte_same(src_pte_old, entry)) {
4896bc70fbf2SPeter Xu 					restore_reserve_on_error(h, dst_vma, addr,
4897846be085SMike Kravetz 								new);
48984eae4efaSPeter Xu 					put_page(new);
48993aa4ed80SMiaohe Lin 					/* huge_ptep of dst_pte won't change as in child */
49004eae4efaSPeter Xu 					goto again;
49014eae4efaSPeter Xu 				}
4902bc70fbf2SPeter Xu 				hugetlb_install_page(dst_vma, dst_pte, addr, new);
49034eae4efaSPeter Xu 				spin_unlock(src_ptl);
49044eae4efaSPeter Xu 				spin_unlock(dst_ptl);
49054eae4efaSPeter Xu 				continue;
49064eae4efaSPeter Xu 			}
49074eae4efaSPeter Xu 
490834ee645eSJoerg Roedel 			if (cow) {
49090f10851eSJérôme Glisse 				/*
49100f10851eSJérôme Glisse 				 * No need to notify as we are downgrading page
49110f10851eSJérôme Glisse 				 * table protection not changing it to point
49120f10851eSJérôme Glisse 				 * to a new page.
49130f10851eSJérôme Glisse 				 *
4914ee65728eSMike Rapoport 				 * See Documentation/mm/mmu_notifier.rst
49150f10851eSJérôme Glisse 				 */
49167f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
491784894e1cSPeter Xu 				entry = huge_pte_wrprotect(entry);
491834ee645eSJoerg Roedel 			}
49194eae4efaSPeter Xu 
492063551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
49214eae4efaSPeter Xu 			hugetlb_count_add(npages, dst);
49221c59827dSHugh Dickins 		}
4923cb900f41SKirill A. Shutemov 		spin_unlock(src_ptl);
4924cb900f41SKirill A. Shutemov 		spin_unlock(dst_ptl);
492563551ae0SDavid Gibson 	}
492663551ae0SDavid Gibson 
4927623a1ddfSDavid Hildenbrand 	if (cow) {
4928623a1ddfSDavid Hildenbrand 		raw_write_seqcount_end(&src->write_protect_seq);
4929ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_end(&range);
4930623a1ddfSDavid Hildenbrand 	}
4931e8569dd2SAndreas Sandberg 
4932e8569dd2SAndreas Sandberg 	return ret;
493363551ae0SDavid Gibson }
493463551ae0SDavid Gibson 
4935550a7d60SMina Almasry static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
4936db110a99SAneesh Kumar K.V 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
4937550a7d60SMina Almasry {
4938550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4939550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4940550a7d60SMina Almasry 	spinlock_t *src_ptl, *dst_ptl;
4941db110a99SAneesh Kumar K.V 	pte_t pte;
4942550a7d60SMina Almasry 
4943550a7d60SMina Almasry 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
4944550a7d60SMina Almasry 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
4945550a7d60SMina Almasry 
4946550a7d60SMina Almasry 	/*
4947550a7d60SMina Almasry 	 * We don't have to worry about the ordering of src and dst ptlocks
4948550a7d60SMina Almasry 	 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4949550a7d60SMina Almasry 	 */
4950550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4951550a7d60SMina Almasry 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4952550a7d60SMina Almasry 
4953550a7d60SMina Almasry 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
4954550a7d60SMina Almasry 	set_huge_pte_at(mm, new_addr, dst_pte, pte);
4955550a7d60SMina Almasry 
4956550a7d60SMina Almasry 	if (src_ptl != dst_ptl)
4957550a7d60SMina Almasry 		spin_unlock(src_ptl);
4958550a7d60SMina Almasry 	spin_unlock(dst_ptl);
4959550a7d60SMina Almasry }
4960550a7d60SMina Almasry 
4961550a7d60SMina Almasry int move_hugetlb_page_tables(struct vm_area_struct *vma,
4962550a7d60SMina Almasry 			     struct vm_area_struct *new_vma,
4963550a7d60SMina Almasry 			     unsigned long old_addr, unsigned long new_addr,
4964550a7d60SMina Almasry 			     unsigned long len)
4965550a7d60SMina Almasry {
4966550a7d60SMina Almasry 	struct hstate *h = hstate_vma(vma);
4967550a7d60SMina Almasry 	struct address_space *mapping = vma->vm_file->f_mapping;
4968550a7d60SMina Almasry 	unsigned long sz = huge_page_size(h);
4969550a7d60SMina Almasry 	struct mm_struct *mm = vma->vm_mm;
4970550a7d60SMina Almasry 	unsigned long old_end = old_addr + len;
4971e95a9851SMike Kravetz 	unsigned long last_addr_mask;
4972550a7d60SMina Almasry 	pte_t *src_pte, *dst_pte;
4973550a7d60SMina Almasry 	struct mmu_notifier_range range;
49743d0b95cdSBaolin Wang 	bool shared_pmd = false;
4975550a7d60SMina Almasry 
4976550a7d60SMina Almasry 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
4977550a7d60SMina Almasry 				old_end);
4978550a7d60SMina Almasry 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
49793d0b95cdSBaolin Wang 	/*
49803d0b95cdSBaolin Wang 	 * In case of shared PMDs, we should cover the maximum possible
49813d0b95cdSBaolin Wang 	 * range.
49823d0b95cdSBaolin Wang 	 */
49833d0b95cdSBaolin Wang 	flush_cache_range(vma, range.start, range.end);
49843d0b95cdSBaolin Wang 
4985550a7d60SMina Almasry 	mmu_notifier_invalidate_range_start(&range);
4986e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
4987550a7d60SMina Almasry 	/* Prevent race with file truncation */
4988550a7d60SMina Almasry 	i_mmap_lock_write(mapping);
4989550a7d60SMina Almasry 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
4990550a7d60SMina Almasry 		src_pte = huge_pte_offset(mm, old_addr, sz);
4991e95a9851SMike Kravetz 		if (!src_pte) {
4992e95a9851SMike Kravetz 			old_addr |= last_addr_mask;
4993e95a9851SMike Kravetz 			new_addr |= last_addr_mask;
4994550a7d60SMina Almasry 			continue;
4995e95a9851SMike Kravetz 		}
4996550a7d60SMina Almasry 		if (huge_pte_none(huge_ptep_get(src_pte)))
4997550a7d60SMina Almasry 			continue;
4998550a7d60SMina Almasry 
49994ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
50003d0b95cdSBaolin Wang 			shared_pmd = true;
50014ddb4d91SMike Kravetz 			old_addr |= last_addr_mask;
50024ddb4d91SMike Kravetz 			new_addr |= last_addr_mask;
5003550a7d60SMina Almasry 			continue;
50043d0b95cdSBaolin Wang 		}
5005550a7d60SMina Almasry 
5006550a7d60SMina Almasry 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5007550a7d60SMina Almasry 		if (!dst_pte)
5008550a7d60SMina Almasry 			break;
5009550a7d60SMina Almasry 
5010db110a99SAneesh Kumar K.V 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
5011550a7d60SMina Almasry 	}
50123d0b95cdSBaolin Wang 
50133d0b95cdSBaolin Wang 	if (shared_pmd)
50143d0b95cdSBaolin Wang 		flush_tlb_range(vma, range.start, range.end);
50153d0b95cdSBaolin Wang 	else
5016550a7d60SMina Almasry 		flush_tlb_range(vma, old_end - len, old_end);
5017550a7d60SMina Almasry 	mmu_notifier_invalidate_range_end(&range);
501813e4ad2cSNadav Amit 	i_mmap_unlock_write(mapping);
5019550a7d60SMina Almasry 
5020550a7d60SMina Almasry 	return len + old_addr - old_end;
5021550a7d60SMina Almasry }
5022550a7d60SMina Almasry 
502373c54763SPeter Xu static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
502424669e58SAneesh Kumar K.V 				   unsigned long start, unsigned long end,
502505e90bd0SPeter Xu 				   struct page *ref_page, zap_flags_t zap_flags)
502663551ae0SDavid Gibson {
502763551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
502863551ae0SDavid Gibson 	unsigned long address;
5029c7546f8fSDavid Gibson 	pte_t *ptep;
503063551ae0SDavid Gibson 	pte_t pte;
5031cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
503263551ae0SDavid Gibson 	struct page *page;
5033a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
5034a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
5035ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
5036e95a9851SMike Kravetz 	unsigned long last_addr_mask;
5037a4a118f2SNadav Amit 	bool force_flush = false;
5038a5516438SAndi Kleen 
503963551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
5040a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
5041a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
504263551ae0SDavid Gibson 
504307e32661SAneesh Kumar K.V 	/*
504407e32661SAneesh Kumar K.V 	 * This is a hugetlb vma, all the pte entries should point
504507e32661SAneesh Kumar K.V 	 * to huge page.
504607e32661SAneesh Kumar K.V 	 */
5047ed6a7935SPeter Zijlstra 	tlb_change_page_size(tlb, sz);
504824669e58SAneesh Kumar K.V 	tlb_start_vma(tlb, vma);
5049dff11abeSMike Kravetz 
5050dff11abeSMike Kravetz 	/*
5051dff11abeSMike Kravetz 	 * If sharing possible, alert mmu notifiers of worst case.
5052dff11abeSMike Kravetz 	 */
50536f4f13e8SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
50546f4f13e8SJérôme Glisse 				end);
5055ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5056ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5057e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
5058569f48b8SHillf Danton 	address = start;
5059569f48b8SHillf Danton 	for (; address < end; address += sz) {
50607868a208SPunit Agrawal 		ptep = huge_pte_offset(mm, address, sz);
5061e95a9851SMike Kravetz 		if (!ptep) {
5062e95a9851SMike Kravetz 			address |= last_addr_mask;
5063c7546f8fSDavid Gibson 			continue;
5064e95a9851SMike Kravetz 		}
5065c7546f8fSDavid Gibson 
5066cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
50674ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
506831d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
5069a4a118f2SNadav Amit 			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5070a4a118f2SNadav Amit 			force_flush = true;
50714ddb4d91SMike Kravetz 			address |= last_addr_mask;
507231d49da5SAneesh Kumar K.V 			continue;
507331d49da5SAneesh Kumar K.V 		}
507439dde65cSChen, Kenneth W 
50756629326bSHillf Danton 		pte = huge_ptep_get(ptep);
507631d49da5SAneesh Kumar K.V 		if (huge_pte_none(pte)) {
507731d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
507831d49da5SAneesh Kumar K.V 			continue;
507931d49da5SAneesh Kumar K.V 		}
50806629326bSHillf Danton 
50816629326bSHillf Danton 		/*
50829fbc1f63SNaoya Horiguchi 		 * Migrating hugepage or HWPoisoned hugepage is already
50839fbc1f63SNaoya Horiguchi 		 * unmapped and its refcount is dropped, so just clear pte here.
50846629326bSHillf Danton 		 */
50859fbc1f63SNaoya Horiguchi 		if (unlikely(!pte_present(pte))) {
508605e90bd0SPeter Xu 			/*
508705e90bd0SPeter Xu 			 * If the pte was wr-protected by uffd-wp in any of the
508805e90bd0SPeter Xu 			 * swap forms, meanwhile the caller does not want to
508905e90bd0SPeter Xu 			 * drop the uffd-wp bit in this zap, then replace the
509005e90bd0SPeter Xu 			 * pte with a marker.
509105e90bd0SPeter Xu 			 */
509205e90bd0SPeter Xu 			if (pte_swp_uffd_wp_any(pte) &&
509305e90bd0SPeter Xu 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
509405e90bd0SPeter Xu 				set_huge_pte_at(mm, address, ptep,
509505e90bd0SPeter Xu 						make_pte_marker(PTE_MARKER_UFFD_WP));
509605e90bd0SPeter Xu 			else
50979386fac3SPunit Agrawal 				huge_pte_clear(mm, address, ptep, sz);
509831d49da5SAneesh Kumar K.V 			spin_unlock(ptl);
509931d49da5SAneesh Kumar K.V 			continue;
51008c4894c6SNaoya Horiguchi 		}
51016629326bSHillf Danton 
51026629326bSHillf Danton 		page = pte_page(pte);
510304f2cbe3SMel Gorman 		/*
510404f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
510504f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
510604f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
510704f2cbe3SMel Gorman 		 */
510804f2cbe3SMel Gorman 		if (ref_page) {
510931d49da5SAneesh Kumar K.V 			if (page != ref_page) {
511031d49da5SAneesh Kumar K.V 				spin_unlock(ptl);
511131d49da5SAneesh Kumar K.V 				continue;
511231d49da5SAneesh Kumar K.V 			}
511304f2cbe3SMel Gorman 			/*
511404f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
511504f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
511604f2cbe3SMel Gorman 			 * looking like data was lost
511704f2cbe3SMel Gorman 			 */
511804f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
511904f2cbe3SMel Gorman 		}
512004f2cbe3SMel Gorman 
5121c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
5122b528e4b6SAneesh Kumar K.V 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5123106c992aSGerald Schaefer 		if (huge_pte_dirty(pte))
51246649a386SKen Chen 			set_page_dirty(page);
512505e90bd0SPeter Xu 		/* Leave a uffd-wp pte marker if needed */
512605e90bd0SPeter Xu 		if (huge_pte_uffd_wp(pte) &&
512705e90bd0SPeter Xu 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
512805e90bd0SPeter Xu 			set_huge_pte_at(mm, address, ptep,
512905e90bd0SPeter Xu 					make_pte_marker(PTE_MARKER_UFFD_WP));
51305d317b2bSNaoya Horiguchi 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5131cea86fe2SHugh Dickins 		page_remove_rmap(page, vma, true);
513231d49da5SAneesh Kumar K.V 
5133cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
5134e77b0852SAneesh Kumar K.V 		tlb_remove_page_size(tlb, page, huge_page_size(h));
513524669e58SAneesh Kumar K.V 		/*
513631d49da5SAneesh Kumar K.V 		 * Bail out after unmapping reference page if supplied
513724669e58SAneesh Kumar K.V 		 */
513831d49da5SAneesh Kumar K.V 		if (ref_page)
513931d49da5SAneesh Kumar K.V 			break;
5140fe1668aeSChen, Kenneth W 	}
5141ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
514224669e58SAneesh Kumar K.V 	tlb_end_vma(tlb, vma);
5143a4a118f2SNadav Amit 
5144a4a118f2SNadav Amit 	/*
5145a4a118f2SNadav Amit 	 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5146a4a118f2SNadav Amit 	 * could defer the flush until now, since by holding i_mmap_rwsem we
5147a4a118f2SNadav Amit 	 * guaranteed that the last refernece would not be dropped. But we must
5148a4a118f2SNadav Amit 	 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5149a4a118f2SNadav Amit 	 * dropped and the last reference to the shared PMDs page might be
5150a4a118f2SNadav Amit 	 * dropped as well.
5151a4a118f2SNadav Amit 	 *
5152a4a118f2SNadav Amit 	 * In theory we could defer the freeing of the PMD pages as well, but
5153a4a118f2SNadav Amit 	 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5154a4a118f2SNadav Amit 	 * detect sharing, so we cannot defer the release of the page either.
5155a4a118f2SNadav Amit 	 * Instead, do flush now.
5156a4a118f2SNadav Amit 	 */
5157a4a118f2SNadav Amit 	if (force_flush)
5158a4a118f2SNadav Amit 		tlb_flush_mmu_tlbonly(tlb);
51591da177e4SLinus Torvalds }
516063551ae0SDavid Gibson 
5161d833352aSMel Gorman void __unmap_hugepage_range_final(struct mmu_gather *tlb,
5162d833352aSMel Gorman 			  struct vm_area_struct *vma, unsigned long start,
516305e90bd0SPeter Xu 			  unsigned long end, struct page *ref_page,
516405e90bd0SPeter Xu 			  zap_flags_t zap_flags)
5165d833352aSMel Gorman {
516605e90bd0SPeter Xu 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
5167d833352aSMel Gorman 
5168d833352aSMel Gorman 	/*
5169d833352aSMel Gorman 	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
5170d833352aSMel Gorman 	 * test will fail on a vma being torn down, and not grab a page table
5171d833352aSMel Gorman 	 * on its way out.  We're lucky that the flag has such an appropriate
5172d833352aSMel Gorman 	 * name, and can in fact be safely cleared here. We could clear it
5173d833352aSMel Gorman 	 * before the __unmap_hugepage_range above, but all that's necessary
5174c8c06efaSDavidlohr Bueso 	 * is to clear it before releasing the i_mmap_rwsem. This works
5175d833352aSMel Gorman 	 * because in the context this is called, the VMA is about to be
5176c8c06efaSDavidlohr Bueso 	 * destroyed and the i_mmap_rwsem is held.
5177d833352aSMel Gorman 	 */
5178d833352aSMel Gorman 	vma->vm_flags &= ~VM_MAYSHARE;
5179d833352aSMel Gorman }
5180d833352aSMel Gorman 
5181502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
518205e90bd0SPeter Xu 			  unsigned long end, struct page *ref_page,
518305e90bd0SPeter Xu 			  zap_flags_t zap_flags)
5184502717f4SChen, Kenneth W {
518524669e58SAneesh Kumar K.V 	struct mmu_gather tlb;
5186dff11abeSMike Kravetz 
5187a72afd87SWill Deacon 	tlb_gather_mmu(&tlb, vma->vm_mm);
518805e90bd0SPeter Xu 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5189ae8eba8bSWill Deacon 	tlb_finish_mmu(&tlb);
5190502717f4SChen, Kenneth W }
5191502717f4SChen, Kenneth W 
519204f2cbe3SMel Gorman /*
519304f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5194578b7725SZhiyuan Dai  * mapping it owns the reserve page for. The intention is to unmap the page
519504f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
519604f2cbe3SMel Gorman  * same region.
519704f2cbe3SMel Gorman  */
51982f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
51992a4b3dedSHarvey Harrison 			      struct page *page, unsigned long address)
520004f2cbe3SMel Gorman {
52017526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
520204f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
520304f2cbe3SMel Gorman 	struct address_space *mapping;
520404f2cbe3SMel Gorman 	pgoff_t pgoff;
520504f2cbe3SMel Gorman 
520604f2cbe3SMel Gorman 	/*
520704f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
520804f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
520904f2cbe3SMel Gorman 	 */
52107526674dSAdam Litke 	address = address & huge_page_mask(h);
521136e4f20aSMichal Hocko 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
521236e4f20aSMichal Hocko 			vma->vm_pgoff;
521393c76a3dSAl Viro 	mapping = vma->vm_file->f_mapping;
521404f2cbe3SMel Gorman 
52154eb2b1dcSMel Gorman 	/*
52164eb2b1dcSMel Gorman 	 * Take the mapping lock for the duration of the table walk. As
52174eb2b1dcSMel Gorman 	 * this mapping should be shared between all the VMAs,
52184eb2b1dcSMel Gorman 	 * __unmap_hugepage_range() is called as the lock is already held
52194eb2b1dcSMel Gorman 	 */
522083cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
52216b2dbba8SMichel Lespinasse 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
522204f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
522304f2cbe3SMel Gorman 		if (iter_vma == vma)
522404f2cbe3SMel Gorman 			continue;
522504f2cbe3SMel Gorman 
522604f2cbe3SMel Gorman 		/*
52272f84a899SMel Gorman 		 * Shared VMAs have their own reserves and do not affect
52282f84a899SMel Gorman 		 * MAP_PRIVATE accounting but it is possible that a shared
52292f84a899SMel Gorman 		 * VMA is using the same page so check and skip such VMAs.
52302f84a899SMel Gorman 		 */
52312f84a899SMel Gorman 		if (iter_vma->vm_flags & VM_MAYSHARE)
52322f84a899SMel Gorman 			continue;
52332f84a899SMel Gorman 
52342f84a899SMel Gorman 		/*
523504f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
523604f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
523704f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
523804f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
523904f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
524004f2cbe3SMel Gorman 		 */
524104f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
524224669e58SAneesh Kumar K.V 			unmap_hugepage_range(iter_vma, address,
524305e90bd0SPeter Xu 					     address + huge_page_size(h), page, 0);
524404f2cbe3SMel Gorman 	}
524583cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(mapping);
524604f2cbe3SMel Gorman }
524704f2cbe3SMel Gorman 
52480fe6e20bSNaoya Horiguchi /*
5249c89357e2SDavid Hildenbrand  * hugetlb_wp() should be called with page lock of the original hugepage held.
5250aa6d2e8cSBaolin Wang  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5251ef009b25SMichal Hocko  * cannot race with other handlers or page migration.
5252ef009b25SMichal Hocko  * Keep the pte_same checks anyway to make transition from the mutex easier.
52530fe6e20bSNaoya Horiguchi  */
5254c89357e2SDavid Hildenbrand static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5255c89357e2SDavid Hildenbrand 		       unsigned long address, pte_t *ptep, unsigned int flags,
5256cb900f41SKirill A. Shutemov 		       struct page *pagecache_page, spinlock_t *ptl)
52571e8f889bSDavid Gibson {
5258c89357e2SDavid Hildenbrand 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
52593999f52eSAneesh Kumar K.V 	pte_t pte;
5260a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
52611e8f889bSDavid Gibson 	struct page *old_page, *new_page;
52622b740303SSouptick Joarder 	int outside_reserve = 0;
52632b740303SSouptick Joarder 	vm_fault_t ret = 0;
5264974e6d66SHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5265ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
52661e8f889bSDavid Gibson 
5267c89357e2SDavid Hildenbrand 	VM_BUG_ON(unshare && (flags & FOLL_WRITE));
5268c89357e2SDavid Hildenbrand 	VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
5269c89357e2SDavid Hildenbrand 
52701d8d1464SDavid Hildenbrand 	/*
52711d8d1464SDavid Hildenbrand 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
52721d8d1464SDavid Hildenbrand 	 * PTE mapped R/O such as maybe_mkwrite() would do.
52731d8d1464SDavid Hildenbrand 	 */
52741d8d1464SDavid Hildenbrand 	if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
52751d8d1464SDavid Hildenbrand 		return VM_FAULT_SIGSEGV;
52761d8d1464SDavid Hildenbrand 
52771d8d1464SDavid Hildenbrand 	/* Let's take out MAP_SHARED mappings first. */
52781d8d1464SDavid Hildenbrand 	if (vma->vm_flags & VM_MAYSHARE) {
52791d8d1464SDavid Hildenbrand 		if (unlikely(unshare))
52801d8d1464SDavid Hildenbrand 			return 0;
52811d8d1464SDavid Hildenbrand 		set_huge_ptep_writable(vma, haddr, ptep);
52821d8d1464SDavid Hildenbrand 		return 0;
52831d8d1464SDavid Hildenbrand 	}
52841d8d1464SDavid Hildenbrand 
52853999f52eSAneesh Kumar K.V 	pte = huge_ptep_get(ptep);
52861e8f889bSDavid Gibson 	old_page = pte_page(pte);
52871e8f889bSDavid Gibson 
5288662ce1dcSYang Yang 	delayacct_wpcopy_start();
5289662ce1dcSYang Yang 
529004f2cbe3SMel Gorman retry_avoidcopy:
5291c89357e2SDavid Hildenbrand 	/*
5292c89357e2SDavid Hildenbrand 	 * If no-one else is actually using this page, we're the exclusive
5293c89357e2SDavid Hildenbrand 	 * owner and can reuse this page.
5294c89357e2SDavid Hildenbrand 	 */
529537a2140dSJoonsoo Kim 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5296c89357e2SDavid Hildenbrand 		if (!PageAnonExclusive(old_page))
52975a49973dSHugh Dickins 			page_move_anon_rmap(old_page, vma);
5298c89357e2SDavid Hildenbrand 		if (likely(!unshare))
52995b7a1d40SHuang Ying 			set_huge_ptep_writable(vma, haddr, ptep);
5300662ce1dcSYang Yang 
5301662ce1dcSYang Yang 		delayacct_wpcopy_end();
530283c54070SNick Piggin 		return 0;
53031e8f889bSDavid Gibson 	}
53046c287605SDavid Hildenbrand 	VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page),
53056c287605SDavid Hildenbrand 		       old_page);
53061e8f889bSDavid Gibson 
530704f2cbe3SMel Gorman 	/*
530804f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
530904f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
531004f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
531104f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
531204f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
531304f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
531404f2cbe3SMel Gorman 	 * of the full address range.
531504f2cbe3SMel Gorman 	 */
53165944d011SJoonsoo Kim 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
531704f2cbe3SMel Gorman 			old_page != pagecache_page)
531804f2cbe3SMel Gorman 		outside_reserve = 1;
531904f2cbe3SMel Gorman 
532009cbfeafSKirill A. Shutemov 	get_page(old_page);
5321b76c8cfbSLarry Woodman 
5322ad4404a2SDavidlohr Bueso 	/*
5323ad4404a2SDavidlohr Bueso 	 * Drop page table lock as buddy allocator may be called. It will
5324ad4404a2SDavidlohr Bueso 	 * be acquired again before returning to the caller, as expected.
5325ad4404a2SDavidlohr Bueso 	 */
5326cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
53275b7a1d40SHuang Ying 	new_page = alloc_huge_page(vma, haddr, outside_reserve);
53281e8f889bSDavid Gibson 
53292fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
533004f2cbe3SMel Gorman 		/*
533104f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
533204f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
533304f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
533404f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
533504f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
533604f2cbe3SMel Gorman 		 */
533704f2cbe3SMel Gorman 		if (outside_reserve) {
533809cbfeafSKirill A. Shutemov 			put_page(old_page);
53395b7a1d40SHuang Ying 			unmap_ref_private(mm, vma, old_page, haddr);
5340cb900f41SKirill A. Shutemov 			spin_lock(ptl);
53415b7a1d40SHuang Ying 			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5342a9af0c5dSNaoya Horiguchi 			if (likely(ptep &&
5343a9af0c5dSNaoya Horiguchi 				   pte_same(huge_ptep_get(ptep), pte)))
534404f2cbe3SMel Gorman 				goto retry_avoidcopy;
5345a734bcc8SHillf Danton 			/*
5346cb900f41SKirill A. Shutemov 			 * race occurs while re-acquiring page table
5347cb900f41SKirill A. Shutemov 			 * lock, and our job is done.
5348a734bcc8SHillf Danton 			 */
5349662ce1dcSYang Yang 			delayacct_wpcopy_end();
5350a734bcc8SHillf Danton 			return 0;
535104f2cbe3SMel Gorman 		}
535204f2cbe3SMel Gorman 
53532b740303SSouptick Joarder 		ret = vmf_error(PTR_ERR(new_page));
5354ad4404a2SDavidlohr Bueso 		goto out_release_old;
53551e8f889bSDavid Gibson 	}
53561e8f889bSDavid Gibson 
53570fe6e20bSNaoya Horiguchi 	/*
53580fe6e20bSNaoya Horiguchi 	 * When the original hugepage is shared one, it does not have
53590fe6e20bSNaoya Horiguchi 	 * anon_vma prepared.
53600fe6e20bSNaoya Horiguchi 	 */
536144e2aa93SDean Nelson 	if (unlikely(anon_vma_prepare(vma))) {
5362ad4404a2SDavidlohr Bueso 		ret = VM_FAULT_OOM;
5363ad4404a2SDavidlohr Bueso 		goto out_release_all;
536444e2aa93SDean Nelson 	}
53650fe6e20bSNaoya Horiguchi 
5366974e6d66SHuang Ying 	copy_user_huge_page(new_page, old_page, address, vma,
536747ad8475SAndrea Arcangeli 			    pages_per_huge_page(h));
53680ed361deSNick Piggin 	__SetPageUptodate(new_page);
53691e8f889bSDavid Gibson 
53707269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
53716f4f13e8SJérôme Glisse 				haddr + huge_page_size(h));
5372ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
5373ad4404a2SDavidlohr Bueso 
5374b76c8cfbSLarry Woodman 	/*
5375cb900f41SKirill A. Shutemov 	 * Retake the page table lock to check for racing updates
5376b76c8cfbSLarry Woodman 	 * before the page tables are altered
5377b76c8cfbSLarry Woodman 	 */
5378cb900f41SKirill A. Shutemov 	spin_lock(ptl);
53795b7a1d40SHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5380a9af0c5dSNaoya Horiguchi 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5381d6995da3SMike Kravetz 		ClearHPageRestoreReserve(new_page);
538207443a85SJoonsoo Kim 
5383c89357e2SDavid Hildenbrand 		/* Break COW or unshare */
53845b7a1d40SHuang Ying 		huge_ptep_clear_flush(vma, haddr, ptep);
5385ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range(mm, range.start, range.end);
5386cea86fe2SHugh Dickins 		page_remove_rmap(old_page, vma, true);
53875b7a1d40SHuang Ying 		hugepage_add_new_anon_rmap(new_page, vma, haddr);
53881eba86c0SPasha Tatashin 		set_huge_pte_at(mm, haddr, ptep,
5389c89357e2SDavid Hildenbrand 				make_huge_pte(vma, new_page, !unshare));
53908f251a3dSMike Kravetz 		SetHPageMigratable(new_page);
53911e8f889bSDavid Gibson 		/* Make the old page be freed below */
53921e8f889bSDavid Gibson 		new_page = old_page;
53931e8f889bSDavid Gibson 	}
5394cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5395ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
5396ad4404a2SDavidlohr Bueso out_release_all:
5397c89357e2SDavid Hildenbrand 	/*
5398c89357e2SDavid Hildenbrand 	 * No restore in case of successful pagetable update (Break COW or
5399c89357e2SDavid Hildenbrand 	 * unshare)
5400c89357e2SDavid Hildenbrand 	 */
5401c7b1850dSMike Kravetz 	if (new_page != old_page)
54025b7a1d40SHuang Ying 		restore_reserve_on_error(h, vma, haddr, new_page);
540309cbfeafSKirill A. Shutemov 	put_page(new_page);
5404ad4404a2SDavidlohr Bueso out_release_old:
540509cbfeafSKirill A. Shutemov 	put_page(old_page);
54068312034fSJoonsoo Kim 
5407ad4404a2SDavidlohr Bueso 	spin_lock(ptl); /* Caller expects lock to be held */
5408662ce1dcSYang Yang 
5409662ce1dcSYang Yang 	delayacct_wpcopy_end();
5410ad4404a2SDavidlohr Bueso 	return ret;
54111e8f889bSDavid Gibson }
54121e8f889bSDavid Gibson 
54133ae77f43SHugh Dickins /*
54143ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
54153ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
54163ae77f43SHugh Dickins  */
54173ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
54182a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
54192a15efc9SHugh Dickins {
54202a15efc9SHugh Dickins 	struct address_space *mapping;
54212a15efc9SHugh Dickins 	pgoff_t idx;
54222a15efc9SHugh Dickins 	struct page *page;
54232a15efc9SHugh Dickins 
54242a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
54252a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
54262a15efc9SHugh Dickins 
54272a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
54282a15efc9SHugh Dickins 	if (page)
54292a15efc9SHugh Dickins 		put_page(page);
54302a15efc9SHugh Dickins 	return page != NULL;
54312a15efc9SHugh Dickins }
54322a15efc9SHugh Dickins 
5433ab76ad54SMike Kravetz int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
5434ab76ad54SMike Kravetz 			   pgoff_t idx)
5435ab76ad54SMike Kravetz {
5436d9ef44deSMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
5437ab76ad54SMike Kravetz 	struct inode *inode = mapping->host;
5438ab76ad54SMike Kravetz 	struct hstate *h = hstate_inode(inode);
5439d9ef44deSMatthew Wilcox (Oracle) 	int err;
5440ab76ad54SMike Kravetz 
5441d9ef44deSMatthew Wilcox (Oracle) 	__folio_set_locked(folio);
5442d9ef44deSMatthew Wilcox (Oracle) 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5443d9ef44deSMatthew Wilcox (Oracle) 
5444d9ef44deSMatthew Wilcox (Oracle) 	if (unlikely(err)) {
5445d9ef44deSMatthew Wilcox (Oracle) 		__folio_clear_locked(folio);
5446ab76ad54SMike Kravetz 		return err;
5447d9ef44deSMatthew Wilcox (Oracle) 	}
5448d6995da3SMike Kravetz 	ClearHPageRestoreReserve(page);
5449ab76ad54SMike Kravetz 
545022146c3cSMike Kravetz 	/*
5451d9ef44deSMatthew Wilcox (Oracle) 	 * mark folio dirty so that it will not be removed from cache/file
545222146c3cSMike Kravetz 	 * by non-hugetlbfs specific code paths.
545322146c3cSMike Kravetz 	 */
5454d9ef44deSMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
545522146c3cSMike Kravetz 
5456ab76ad54SMike Kravetz 	spin_lock(&inode->i_lock);
5457ab76ad54SMike Kravetz 	inode->i_blocks += blocks_per_huge_page(h);
5458ab76ad54SMike Kravetz 	spin_unlock(&inode->i_lock);
5459ab76ad54SMike Kravetz 	return 0;
5460ab76ad54SMike Kravetz }
5461ab76ad54SMike Kravetz 
54627677f7fdSAxel Rasmussen static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
54637677f7fdSAxel Rasmussen 						  struct address_space *mapping,
54647677f7fdSAxel Rasmussen 						  pgoff_t idx,
54657677f7fdSAxel Rasmussen 						  unsigned int flags,
54667677f7fdSAxel Rasmussen 						  unsigned long haddr,
5467824ddc60SNadav Amit 						  unsigned long addr,
54687677f7fdSAxel Rasmussen 						  unsigned long reason)
54697677f7fdSAxel Rasmussen {
54707677f7fdSAxel Rasmussen 	vm_fault_t ret;
54717677f7fdSAxel Rasmussen 	u32 hash;
54727677f7fdSAxel Rasmussen 	struct vm_fault vmf = {
54737677f7fdSAxel Rasmussen 		.vma = vma,
54747677f7fdSAxel Rasmussen 		.address = haddr,
5475824ddc60SNadav Amit 		.real_address = addr,
54767677f7fdSAxel Rasmussen 		.flags = flags,
54777677f7fdSAxel Rasmussen 
54787677f7fdSAxel Rasmussen 		/*
54797677f7fdSAxel Rasmussen 		 * Hard to debug if it ends up being
54807677f7fdSAxel Rasmussen 		 * used by a callee that assumes
54817677f7fdSAxel Rasmussen 		 * something about the other
54827677f7fdSAxel Rasmussen 		 * uninitialized fields... same as in
54837677f7fdSAxel Rasmussen 		 * memory.c
54847677f7fdSAxel Rasmussen 		 */
54857677f7fdSAxel Rasmussen 	};
54867677f7fdSAxel Rasmussen 
54877677f7fdSAxel Rasmussen 	/*
54887677f7fdSAxel Rasmussen 	 * hugetlb_fault_mutex and i_mmap_rwsem must be
54897677f7fdSAxel Rasmussen 	 * dropped before handling userfault.  Reacquire
54907677f7fdSAxel Rasmussen 	 * after handling fault to make calling code simpler.
54917677f7fdSAxel Rasmussen 	 */
54927677f7fdSAxel Rasmussen 	hash = hugetlb_fault_mutex_hash(mapping, idx);
54937677f7fdSAxel Rasmussen 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
54947677f7fdSAxel Rasmussen 	ret = handle_userfault(&vmf, reason);
54957677f7fdSAxel Rasmussen 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
54967677f7fdSAxel Rasmussen 
54977677f7fdSAxel Rasmussen 	return ret;
54987677f7fdSAxel Rasmussen }
54997677f7fdSAxel Rasmussen 
55002b740303SSouptick Joarder static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
55012b740303SSouptick Joarder 			struct vm_area_struct *vma,
55028382d914SDavidlohr Bueso 			struct address_space *mapping, pgoff_t idx,
5503c64e912cSPeter Xu 			unsigned long address, pte_t *ptep,
5504c64e912cSPeter Xu 			pte_t old_pte, unsigned int flags)
5505ac9b9c66SHugh Dickins {
5506a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
55072b740303SSouptick Joarder 	vm_fault_t ret = VM_FAULT_SIGBUS;
5508409eb8c2SHillf Danton 	int anon_rmap = 0;
55094c887265SAdam Litke 	unsigned long size;
55104c887265SAdam Litke 	struct page *page;
55111e8f889bSDavid Gibson 	pte_t new_pte;
5512cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
5513285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
5514c7b1850dSMike Kravetz 	bool new_page, new_pagecache_page = false;
55154c887265SAdam Litke 
551604f2cbe3SMel Gorman 	/*
551704f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
551804f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
5519c89357e2SDavid Hildenbrand 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5520c89357e2SDavid Hildenbrand 	 * be obvious.
552104f2cbe3SMel Gorman 	 */
552204f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5523910154d5SGeoffrey Thomas 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
552404f2cbe3SMel Gorman 			   current->pid);
552504f2cbe3SMel Gorman 		return ret;
552604f2cbe3SMel Gorman 	}
552704f2cbe3SMel Gorman 
55284c887265SAdam Litke 	/*
5529188a3972SMike Kravetz 	 * Use page lock to guard against racing truncation
5530188a3972SMike Kravetz 	 * before we get page_table_lock.
55314c887265SAdam Litke 	 */
5532c7b1850dSMike Kravetz 	new_page = false;
553387bf91d3SMike Kravetz 	page = find_lock_page(mapping, idx);
553487bf91d3SMike Kravetz 	if (!page) {
5535188a3972SMike Kravetz 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5536188a3972SMike Kravetz 		if (idx >= size)
5537188a3972SMike Kravetz 			goto out;
55387677f7fdSAxel Rasmussen 		/* Check for page in userfault range */
55391a1aad8aSMike Kravetz 		if (userfaultfd_missing(vma)) {
55407677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5541824ddc60SNadav Amit 						       flags, haddr, address,
55427677f7fdSAxel Rasmussen 						       VM_UFFD_MISSING);
55431a1aad8aSMike Kravetz 			goto out;
55441a1aad8aSMike Kravetz 		}
55451a1aad8aSMike Kravetz 
5546285b8dcaSHuang Ying 		page = alloc_huge_page(vma, haddr, 0);
55472fc39cecSAdam Litke 		if (IS_ERR(page)) {
55484643d67eSMike Kravetz 			/*
55494643d67eSMike Kravetz 			 * Returning error will result in faulting task being
55504643d67eSMike Kravetz 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
55514643d67eSMike Kravetz 			 * tasks from racing to fault in the same page which
55524643d67eSMike Kravetz 			 * could result in false unable to allocate errors.
55534643d67eSMike Kravetz 			 * Page migration does not take the fault mutex, but
55544643d67eSMike Kravetz 			 * does a clear then write of pte's under page table
55554643d67eSMike Kravetz 			 * lock.  Page fault code could race with migration,
55564643d67eSMike Kravetz 			 * notice the clear pte and try to allocate a page
55574643d67eSMike Kravetz 			 * here.  Before returning error, get ptl and make
55584643d67eSMike Kravetz 			 * sure there really is no pte entry.
55594643d67eSMike Kravetz 			 */
55604643d67eSMike Kravetz 			ptl = huge_pte_lock(h, mm, ptep);
55614643d67eSMike Kravetz 			ret = 0;
5562d83e6c8aSMiaohe Lin 			if (huge_pte_none(huge_ptep_get(ptep)))
55632b740303SSouptick Joarder 				ret = vmf_error(PTR_ERR(page));
5564d83e6c8aSMiaohe Lin 			spin_unlock(ptl);
55656bda666aSChristoph Lameter 			goto out;
55666bda666aSChristoph Lameter 		}
556747ad8475SAndrea Arcangeli 		clear_huge_page(page, address, pages_per_huge_page(h));
55680ed361deSNick Piggin 		__SetPageUptodate(page);
5569cb6acd01SMike Kravetz 		new_page = true;
5570ac9b9c66SHugh Dickins 
5571f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
5572ab76ad54SMike Kravetz 			int err = huge_add_to_page_cache(page, mapping, idx);
55736bda666aSChristoph Lameter 			if (err) {
55743a5497a2SMiaohe Lin 				/*
55753a5497a2SMiaohe Lin 				 * err can't be -EEXIST which implies someone
55763a5497a2SMiaohe Lin 				 * else consumed the reservation since hugetlb
55773a5497a2SMiaohe Lin 				 * fault mutex is held when add a hugetlb page
55783a5497a2SMiaohe Lin 				 * to the page cache. So it's safe to call
55793a5497a2SMiaohe Lin 				 * restore_reserve_on_error() here.
55803a5497a2SMiaohe Lin 				 */
55813a5497a2SMiaohe Lin 				restore_reserve_on_error(h, vma, haddr, page);
55826bda666aSChristoph Lameter 				put_page(page);
55836bda666aSChristoph Lameter 				goto out;
55846bda666aSChristoph Lameter 			}
5585c7b1850dSMike Kravetz 			new_pagecache_page = true;
558623be7468SMel Gorman 		} else {
55876bda666aSChristoph Lameter 			lock_page(page);
55880fe6e20bSNaoya Horiguchi 			if (unlikely(anon_vma_prepare(vma))) {
55890fe6e20bSNaoya Horiguchi 				ret = VM_FAULT_OOM;
55900fe6e20bSNaoya Horiguchi 				goto backout_unlocked;
559123be7468SMel Gorman 			}
5592409eb8c2SHillf Danton 			anon_rmap = 1;
55930fe6e20bSNaoya Horiguchi 		}
55940fe6e20bSNaoya Horiguchi 	} else {
559557303d80SAndy Whitcroft 		/*
5596998b4382SNaoya Horiguchi 		 * If memory error occurs between mmap() and fault, some process
5597998b4382SNaoya Horiguchi 		 * don't have hwpoisoned swap entry for errored virtual address.
5598998b4382SNaoya Horiguchi 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5599fd6a03edSNaoya Horiguchi 		 */
5600fd6a03edSNaoya Horiguchi 		if (unlikely(PageHWPoison(page))) {
56010eb98f15SMiaohe Lin 			ret = VM_FAULT_HWPOISON_LARGE |
5602972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5603fd6a03edSNaoya Horiguchi 			goto backout_unlocked;
56046bda666aSChristoph Lameter 		}
56057677f7fdSAxel Rasmussen 
56067677f7fdSAxel Rasmussen 		/* Check for page in userfault range. */
56077677f7fdSAxel Rasmussen 		if (userfaultfd_minor(vma)) {
56087677f7fdSAxel Rasmussen 			unlock_page(page);
56097677f7fdSAxel Rasmussen 			put_page(page);
56107677f7fdSAxel Rasmussen 			ret = hugetlb_handle_userfault(vma, mapping, idx,
5611824ddc60SNadav Amit 						       flags, haddr, address,
56127677f7fdSAxel Rasmussen 						       VM_UFFD_MINOR);
56137677f7fdSAxel Rasmussen 			goto out;
56147677f7fdSAxel Rasmussen 		}
5615998b4382SNaoya Horiguchi 	}
56161e8f889bSDavid Gibson 
561757303d80SAndy Whitcroft 	/*
561857303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
561957303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
562057303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
562157303d80SAndy Whitcroft 	 * the spinlock.
562257303d80SAndy Whitcroft 	 */
56235e911373SMike Kravetz 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5624285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
56252b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
56262b26736cSAndy Whitcroft 			goto backout_unlocked;
56272b26736cSAndy Whitcroft 		}
56285e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5629285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
56305e911373SMike Kravetz 	}
563157303d80SAndy Whitcroft 
56328bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(h, mm, ptep);
5633188a3972SMike Kravetz 	size = i_size_read(mapping->host) >> huge_page_shift(h);
5634188a3972SMike Kravetz 	if (idx >= size)
5635188a3972SMike Kravetz 		goto backout;
5636188a3972SMike Kravetz 
563783c54070SNick Piggin 	ret = 0;
5638c64e912cSPeter Xu 	/* If pte changed from under us, retry */
5639c64e912cSPeter Xu 	if (!pte_same(huge_ptep_get(ptep), old_pte))
56404c887265SAdam Litke 		goto backout;
56414c887265SAdam Litke 
564207443a85SJoonsoo Kim 	if (anon_rmap) {
5643d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
5644285b8dcaSHuang Ying 		hugepage_add_new_anon_rmap(page, vma, haddr);
5645ac714904SChoi Gi-yong 	} else
5646fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
56471e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
56481e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
5649c64e912cSPeter Xu 	/*
5650c64e912cSPeter Xu 	 * If this pte was previously wr-protected, keep it wr-protected even
5651c64e912cSPeter Xu 	 * if populated.
5652c64e912cSPeter Xu 	 */
5653c64e912cSPeter Xu 	if (unlikely(pte_marker_uffd_wp(old_pte)))
5654c64e912cSPeter Xu 		new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
5655285b8dcaSHuang Ying 	set_huge_pte_at(mm, haddr, ptep, new_pte);
56561e8f889bSDavid Gibson 
56575d317b2bSNaoya Horiguchi 	hugetlb_count_add(pages_per_huge_page(h), mm);
5658788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
56591e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
5660c89357e2SDavid Hildenbrand 		ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
56611e8f889bSDavid Gibson 	}
56621e8f889bSDavid Gibson 
5663cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
5664cb6acd01SMike Kravetz 
5665cb6acd01SMike Kravetz 	/*
56668f251a3dSMike Kravetz 	 * Only set HPageMigratable in newly allocated pages.  Existing pages
56678f251a3dSMike Kravetz 	 * found in the pagecache may not have HPageMigratableset if they have
56688f251a3dSMike Kravetz 	 * been isolated for migration.
5669cb6acd01SMike Kravetz 	 */
5670cb6acd01SMike Kravetz 	if (new_page)
56718f251a3dSMike Kravetz 		SetHPageMigratable(page);
5672cb6acd01SMike Kravetz 
56734c887265SAdam Litke 	unlock_page(page);
56744c887265SAdam Litke out:
5675ac9b9c66SHugh Dickins 	return ret;
56764c887265SAdam Litke 
56774c887265SAdam Litke backout:
5678cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
56792b26736cSAndy Whitcroft backout_unlocked:
56804c887265SAdam Litke 	unlock_page(page);
5681c7b1850dSMike Kravetz 	/* restore reserve for newly allocated pages not in page cache */
5682c7b1850dSMike Kravetz 	if (new_page && !new_pagecache_page)
5683285b8dcaSHuang Ying 		restore_reserve_on_error(h, vma, haddr, page);
56844c887265SAdam Litke 	put_page(page);
56854c887265SAdam Litke 	goto out;
5686ac9b9c66SHugh Dickins }
5687ac9b9c66SHugh Dickins 
56888382d914SDavidlohr Bueso #ifdef CONFIG_SMP
5689188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
56908382d914SDavidlohr Bueso {
56918382d914SDavidlohr Bueso 	unsigned long key[2];
56928382d914SDavidlohr Bueso 	u32 hash;
56938382d914SDavidlohr Bueso 
56948382d914SDavidlohr Bueso 	key[0] = (unsigned long) mapping;
56958382d914SDavidlohr Bueso 	key[1] = idx;
56968382d914SDavidlohr Bueso 
569755254636SMike Kravetz 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
56988382d914SDavidlohr Bueso 
56998382d914SDavidlohr Bueso 	return hash & (num_fault_mutexes - 1);
57008382d914SDavidlohr Bueso }
57018382d914SDavidlohr Bueso #else
57028382d914SDavidlohr Bueso /*
57036c26d310SMiaohe Lin  * For uniprocessor systems we always use a single mutex, so just
57048382d914SDavidlohr Bueso  * return 0 and avoid the hashing overhead.
57058382d914SDavidlohr Bueso  */
5706188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
57078382d914SDavidlohr Bueso {
57088382d914SDavidlohr Bueso 	return 0;
57098382d914SDavidlohr Bueso }
57108382d914SDavidlohr Bueso #endif
57118382d914SDavidlohr Bueso 
57122b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5713788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
571486e5216fSAdam Litke {
57158382d914SDavidlohr Bueso 	pte_t *ptep, entry;
5716cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
57172b740303SSouptick Joarder 	vm_fault_t ret;
57188382d914SDavidlohr Bueso 	u32 hash;
57198382d914SDavidlohr Bueso 	pgoff_t idx;
57200fe6e20bSNaoya Horiguchi 	struct page *page = NULL;
572157303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
5722a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
57238382d914SDavidlohr Bueso 	struct address_space *mapping;
57240f792cf9SNaoya Horiguchi 	int need_wait_lock = 0;
5725285b8dcaSHuang Ying 	unsigned long haddr = address & huge_page_mask(h);
572686e5216fSAdam Litke 
5727285b8dcaSHuang Ying 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
5728fd6a03edSNaoya Horiguchi 	if (ptep) {
5729fd6a03edSNaoya Horiguchi 		entry = huge_ptep_get(ptep);
5730290408d4SNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(entry))) {
5731ad1ac596SMiaohe Lin 			migration_entry_wait_huge(vma, ptep);
5732290408d4SNaoya Horiguchi 			return 0;
5733290408d4SNaoya Horiguchi 		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
5734aa50d3a7SAndi Kleen 			return VM_FAULT_HWPOISON_LARGE |
5735972dc4deSAneesh Kumar K.V 				VM_FAULT_SET_HINDEX(hstate_index(h));
5736*3a47c54fSMike Kravetz 	} else {
5737aec44e0fSPeter Xu 		ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
5738*3a47c54fSMike Kravetz 		if (!ptep)
5739c0d0381aSMike Kravetz 			return VM_FAULT_OOM;
5740c0d0381aSMike Kravetz 	}
5741ddeaab32SMike Kravetz 
5742*3a47c54fSMike Kravetz 	mapping = vma->vm_file->f_mapping;
5743*3a47c54fSMike Kravetz 	idx = vma_hugecache_offset(h, vma, haddr);
5744*3a47c54fSMike Kravetz 
57453935baa9SDavid Gibson 	/*
57463935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
57473935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
57483935baa9SDavid Gibson 	 * the same page in the page cache.
57493935baa9SDavid Gibson 	 */
5750188b04a7SWei Yang 	hash = hugetlb_fault_mutex_hash(mapping, idx);
5751c672c7f2SMike Kravetz 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
57528382d914SDavidlohr Bueso 
57537f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
5754c64e912cSPeter Xu 	/* PTE markers should be handled the same way as none pte */
5755c64e912cSPeter Xu 	if (huge_pte_none_mostly(entry)) {
5756c64e912cSPeter Xu 		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
5757c64e912cSPeter Xu 				      entry, flags);
5758b4d1d99fSDavid Gibson 		goto out_mutex;
57593935baa9SDavid Gibson 	}
576086e5216fSAdam Litke 
576183c54070SNick Piggin 	ret = 0;
57621e8f889bSDavid Gibson 
576357303d80SAndy Whitcroft 	/*
57640f792cf9SNaoya Horiguchi 	 * entry could be a migration/hwpoison entry at this point, so this
57650f792cf9SNaoya Horiguchi 	 * check prevents the kernel from going below assuming that we have
57667c8de358SEthon Paul 	 * an active hugepage in pagecache. This goto expects the 2nd page
57677c8de358SEthon Paul 	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
57687c8de358SEthon Paul 	 * properly handle it.
57690f792cf9SNaoya Horiguchi 	 */
57700f792cf9SNaoya Horiguchi 	if (!pte_present(entry))
57710f792cf9SNaoya Horiguchi 		goto out_mutex;
57720f792cf9SNaoya Horiguchi 
57730f792cf9SNaoya Horiguchi 	/*
5774c89357e2SDavid Hildenbrand 	 * If we are going to COW/unshare the mapping later, we examine the
5775c89357e2SDavid Hildenbrand 	 * pending reservations for this page now. This will ensure that any
577657303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
57771d8d1464SDavid Hildenbrand 	 * spinlock. Also lookup the pagecache page now as it is used to
57781d8d1464SDavid Hildenbrand 	 * determine if a reservation has been consumed.
577957303d80SAndy Whitcroft 	 */
5780c89357e2SDavid Hildenbrand 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
57811d8d1464SDavid Hildenbrand 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
5782285b8dcaSHuang Ying 		if (vma_needs_reservation(h, vma, haddr) < 0) {
57832b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
5784b4d1d99fSDavid Gibson 			goto out_mutex;
57852b26736cSAndy Whitcroft 		}
57865e911373SMike Kravetz 		/* Just decrements count, does not deallocate */
5787285b8dcaSHuang Ying 		vma_end_reservation(h, vma, haddr);
578857303d80SAndy Whitcroft 
578929be8426SMiaohe Lin 		pagecache_page = find_lock_page(mapping, idx);
579057303d80SAndy Whitcroft 	}
579157303d80SAndy Whitcroft 
57920f792cf9SNaoya Horiguchi 	ptl = huge_pte_lock(h, mm, ptep);
57930fe6e20bSNaoya Horiguchi 
5794c89357e2SDavid Hildenbrand 	/* Check for a racing update before calling hugetlb_wp() */
5795b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
5796cb900f41SKirill A. Shutemov 		goto out_ptl;
5797b4d1d99fSDavid Gibson 
5798166f3eccSPeter Xu 	/* Handle userfault-wp first, before trying to lock more pages */
5799166f3eccSPeter Xu 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
5800166f3eccSPeter Xu 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
5801166f3eccSPeter Xu 		struct vm_fault vmf = {
5802166f3eccSPeter Xu 			.vma = vma,
5803166f3eccSPeter Xu 			.address = haddr,
5804166f3eccSPeter Xu 			.real_address = address,
5805166f3eccSPeter Xu 			.flags = flags,
5806166f3eccSPeter Xu 		};
5807166f3eccSPeter Xu 
5808166f3eccSPeter Xu 		spin_unlock(ptl);
5809166f3eccSPeter Xu 		if (pagecache_page) {
5810166f3eccSPeter Xu 			unlock_page(pagecache_page);
5811166f3eccSPeter Xu 			put_page(pagecache_page);
5812166f3eccSPeter Xu 		}
5813166f3eccSPeter Xu 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5814166f3eccSPeter Xu 		return handle_userfault(&vmf, VM_UFFD_WP);
5815166f3eccSPeter Xu 	}
5816166f3eccSPeter Xu 
58170f792cf9SNaoya Horiguchi 	/*
5818c89357e2SDavid Hildenbrand 	 * hugetlb_wp() requires page locks of pte_page(entry) and
58190f792cf9SNaoya Horiguchi 	 * pagecache_page, so here we need take the former one
58200f792cf9SNaoya Horiguchi 	 * when page != pagecache_page or !pagecache_page.
58210f792cf9SNaoya Horiguchi 	 */
58220f792cf9SNaoya Horiguchi 	page = pte_page(entry);
58230f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
58240f792cf9SNaoya Horiguchi 		if (!trylock_page(page)) {
58250f792cf9SNaoya Horiguchi 			need_wait_lock = 1;
58260f792cf9SNaoya Horiguchi 			goto out_ptl;
58270f792cf9SNaoya Horiguchi 		}
58280f792cf9SNaoya Horiguchi 
58290f792cf9SNaoya Horiguchi 	get_page(page);
5830b4d1d99fSDavid Gibson 
5831c89357e2SDavid Hildenbrand 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5832106c992aSGerald Schaefer 		if (!huge_pte_write(entry)) {
5833c89357e2SDavid Hildenbrand 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
5834cb900f41SKirill A. Shutemov 					 pagecache_page, ptl);
58350f792cf9SNaoya Horiguchi 			goto out_put_page;
5836c89357e2SDavid Hildenbrand 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
5837106c992aSGerald Schaefer 			entry = huge_pte_mkdirty(entry);
5838b4d1d99fSDavid Gibson 		}
5839c89357e2SDavid Hildenbrand 	}
5840b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
5841285b8dcaSHuang Ying 	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
5842788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
5843285b8dcaSHuang Ying 		update_mmu_cache(vma, haddr, ptep);
58440f792cf9SNaoya Horiguchi out_put_page:
58450f792cf9SNaoya Horiguchi 	if (page != pagecache_page)
58460f792cf9SNaoya Horiguchi 		unlock_page(page);
58470f792cf9SNaoya Horiguchi 	put_page(page);
5848cb900f41SKirill A. Shutemov out_ptl:
5849cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
585057303d80SAndy Whitcroft 
585157303d80SAndy Whitcroft 	if (pagecache_page) {
585257303d80SAndy Whitcroft 		unlock_page(pagecache_page);
585357303d80SAndy Whitcroft 		put_page(pagecache_page);
585457303d80SAndy Whitcroft 	}
5855b4d1d99fSDavid Gibson out_mutex:
5856c672c7f2SMike Kravetz 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
58570f792cf9SNaoya Horiguchi 	/*
58580f792cf9SNaoya Horiguchi 	 * Generally it's safe to hold refcount during waiting page lock. But
58590f792cf9SNaoya Horiguchi 	 * here we just wait to defer the next page fault to avoid busy loop and
58600f792cf9SNaoya Horiguchi 	 * the page is not used after unlocked before returning from the current
58610f792cf9SNaoya Horiguchi 	 * page fault. So we are safe from accessing freed page, even if we wait
58620f792cf9SNaoya Horiguchi 	 * here without taking refcount.
58630f792cf9SNaoya Horiguchi 	 */
58640f792cf9SNaoya Horiguchi 	if (need_wait_lock)
58650f792cf9SNaoya Horiguchi 		wait_on_page_locked(page);
58661e8f889bSDavid Gibson 	return ret;
586786e5216fSAdam Litke }
586886e5216fSAdam Litke 
5869714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
58708fb5debcSMike Kravetz /*
58718fb5debcSMike Kravetz  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
58728fb5debcSMike Kravetz  * modifications for huge pages.
58738fb5debcSMike Kravetz  */
58748fb5debcSMike Kravetz int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
58758fb5debcSMike Kravetz 			    pte_t *dst_pte,
58768fb5debcSMike Kravetz 			    struct vm_area_struct *dst_vma,
58778fb5debcSMike Kravetz 			    unsigned long dst_addr,
58788fb5debcSMike Kravetz 			    unsigned long src_addr,
5879f6191471SAxel Rasmussen 			    enum mcopy_atomic_mode mode,
58806041c691SPeter Xu 			    struct page **pagep,
58816041c691SPeter Xu 			    bool wp_copy)
58828fb5debcSMike Kravetz {
5883f6191471SAxel Rasmussen 	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
58848cc5fcbbSMina Almasry 	struct hstate *h = hstate_vma(dst_vma);
58858cc5fcbbSMina Almasry 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
58868cc5fcbbSMina Almasry 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
58871e392147SAndrea Arcangeli 	unsigned long size;
58881c9e8defSMike Kravetz 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
58898fb5debcSMike Kravetz 	pte_t _dst_pte;
58908fb5debcSMike Kravetz 	spinlock_t *ptl;
58918cc5fcbbSMina Almasry 	int ret = -ENOMEM;
58928fb5debcSMike Kravetz 	struct page *page;
5893f6191471SAxel Rasmussen 	int writable;
5894cc30042dSMina Almasry 	bool page_in_pagecache = false;
58958fb5debcSMike Kravetz 
5896f6191471SAxel Rasmussen 	if (is_continue) {
5897f6191471SAxel Rasmussen 		ret = -EFAULT;
5898f6191471SAxel Rasmussen 		page = find_lock_page(mapping, idx);
5899f6191471SAxel Rasmussen 		if (!page)
5900f6191471SAxel Rasmussen 			goto out;
5901cc30042dSMina Almasry 		page_in_pagecache = true;
5902f6191471SAxel Rasmussen 	} else if (!*pagep) {
5903d84cf06eSMina Almasry 		/* If a page already exists, then it's UFFDIO_COPY for
5904d84cf06eSMina Almasry 		 * a non-missing case. Return -EEXIST.
5905d84cf06eSMina Almasry 		 */
5906d84cf06eSMina Almasry 		if (vm_shared &&
5907d84cf06eSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
5908d84cf06eSMina Almasry 			ret = -EEXIST;
59098fb5debcSMike Kravetz 			goto out;
5910d84cf06eSMina Almasry 		}
5911d84cf06eSMina Almasry 
5912d84cf06eSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
5913d84cf06eSMina Almasry 		if (IS_ERR(page)) {
5914d84cf06eSMina Almasry 			ret = -ENOMEM;
5915d84cf06eSMina Almasry 			goto out;
5916d84cf06eSMina Almasry 		}
59178fb5debcSMike Kravetz 
59188fb5debcSMike Kravetz 		ret = copy_huge_page_from_user(page,
59198fb5debcSMike Kravetz 						(const void __user *) src_addr,
5920810a56b9SMike Kravetz 						pages_per_huge_page(h), false);
59218fb5debcSMike Kravetz 
5922c1e8d7c6SMichel Lespinasse 		/* fallback to copy_from_user outside mmap_lock */
59238fb5debcSMike Kravetz 		if (unlikely(ret)) {
59249e368259SAndrea Arcangeli 			ret = -ENOENT;
59258cc5fcbbSMina Almasry 			/* Free the allocated page which may have
59268cc5fcbbSMina Almasry 			 * consumed a reservation.
59278cc5fcbbSMina Almasry 			 */
59288cc5fcbbSMina Almasry 			restore_reserve_on_error(h, dst_vma, dst_addr, page);
59298cc5fcbbSMina Almasry 			put_page(page);
59308cc5fcbbSMina Almasry 
59318cc5fcbbSMina Almasry 			/* Allocate a temporary page to hold the copied
59328cc5fcbbSMina Almasry 			 * contents.
59338cc5fcbbSMina Almasry 			 */
59348cc5fcbbSMina Almasry 			page = alloc_huge_page_vma(h, dst_vma, dst_addr);
59358cc5fcbbSMina Almasry 			if (!page) {
59368cc5fcbbSMina Almasry 				ret = -ENOMEM;
59378cc5fcbbSMina Almasry 				goto out;
59388cc5fcbbSMina Almasry 			}
59398fb5debcSMike Kravetz 			*pagep = page;
59408cc5fcbbSMina Almasry 			/* Set the outparam pagep and return to the caller to
59418cc5fcbbSMina Almasry 			 * copy the contents outside the lock. Don't free the
59428cc5fcbbSMina Almasry 			 * page.
59438cc5fcbbSMina Almasry 			 */
59448fb5debcSMike Kravetz 			goto out;
59458fb5debcSMike Kravetz 		}
59468fb5debcSMike Kravetz 	} else {
59478cc5fcbbSMina Almasry 		if (vm_shared &&
59488cc5fcbbSMina Almasry 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
59498cc5fcbbSMina Almasry 			put_page(*pagep);
59508cc5fcbbSMina Almasry 			ret = -EEXIST;
59518cc5fcbbSMina Almasry 			*pagep = NULL;
59528cc5fcbbSMina Almasry 			goto out;
59538cc5fcbbSMina Almasry 		}
59548cc5fcbbSMina Almasry 
59558cc5fcbbSMina Almasry 		page = alloc_huge_page(dst_vma, dst_addr, 0);
59568cc5fcbbSMina Almasry 		if (IS_ERR(page)) {
5957da9a298fSMiaohe Lin 			put_page(*pagep);
59588cc5fcbbSMina Almasry 			ret = -ENOMEM;
59598cc5fcbbSMina Almasry 			*pagep = NULL;
59608cc5fcbbSMina Almasry 			goto out;
59618cc5fcbbSMina Almasry 		}
596234892366SMuchun Song 		copy_user_huge_page(page, *pagep, dst_addr, dst_vma,
596334892366SMuchun Song 				    pages_per_huge_page(h));
59648cc5fcbbSMina Almasry 		put_page(*pagep);
59658fb5debcSMike Kravetz 		*pagep = NULL;
59668fb5debcSMike Kravetz 	}
59678fb5debcSMike Kravetz 
59688fb5debcSMike Kravetz 	/*
59698fb5debcSMike Kravetz 	 * The memory barrier inside __SetPageUptodate makes sure that
59708fb5debcSMike Kravetz 	 * preceding stores to the page contents become visible before
59718fb5debcSMike Kravetz 	 * the set_pte_at() write.
59728fb5debcSMike Kravetz 	 */
59738fb5debcSMike Kravetz 	__SetPageUptodate(page);
59748fb5debcSMike Kravetz 
5975f6191471SAxel Rasmussen 	/* Add shared, newly allocated pages to the page cache. */
5976f6191471SAxel Rasmussen 	if (vm_shared && !is_continue) {
59771e392147SAndrea Arcangeli 		size = i_size_read(mapping->host) >> huge_page_shift(h);
59781e392147SAndrea Arcangeli 		ret = -EFAULT;
59791e392147SAndrea Arcangeli 		if (idx >= size)
59801e392147SAndrea Arcangeli 			goto out_release_nounlock;
59811c9e8defSMike Kravetz 
59821e392147SAndrea Arcangeli 		/*
59831e392147SAndrea Arcangeli 		 * Serialization between remove_inode_hugepages() and
59841e392147SAndrea Arcangeli 		 * huge_add_to_page_cache() below happens through the
59851e392147SAndrea Arcangeli 		 * hugetlb_fault_mutex_table that here must be hold by
59861e392147SAndrea Arcangeli 		 * the caller.
59871e392147SAndrea Arcangeli 		 */
59881c9e8defSMike Kravetz 		ret = huge_add_to_page_cache(page, mapping, idx);
59891c9e8defSMike Kravetz 		if (ret)
59901c9e8defSMike Kravetz 			goto out_release_nounlock;
5991cc30042dSMina Almasry 		page_in_pagecache = true;
59921c9e8defSMike Kravetz 	}
59931c9e8defSMike Kravetz 
5994bcc66543SMiaohe Lin 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
59958fb5debcSMike Kravetz 
59961e392147SAndrea Arcangeli 	/*
59971e392147SAndrea Arcangeli 	 * Recheck the i_size after holding PT lock to make sure not
59981e392147SAndrea Arcangeli 	 * to leave any page mapped (as page_mapped()) beyond the end
59991e392147SAndrea Arcangeli 	 * of the i_size (remove_inode_hugepages() is strict about
60001e392147SAndrea Arcangeli 	 * enforcing that). If we bail out here, we'll also leave a
60011e392147SAndrea Arcangeli 	 * page in the radix tree in the vm_shared case beyond the end
60021e392147SAndrea Arcangeli 	 * of the i_size, but remove_inode_hugepages() will take care
60031e392147SAndrea Arcangeli 	 * of it as soon as we drop the hugetlb_fault_mutex_table.
60041e392147SAndrea Arcangeli 	 */
60051e392147SAndrea Arcangeli 	size = i_size_read(mapping->host) >> huge_page_shift(h);
60061e392147SAndrea Arcangeli 	ret = -EFAULT;
60071e392147SAndrea Arcangeli 	if (idx >= size)
60081e392147SAndrea Arcangeli 		goto out_release_unlock;
60091e392147SAndrea Arcangeli 
60108fb5debcSMike Kravetz 	ret = -EEXIST;
60116041c691SPeter Xu 	/*
60126041c691SPeter Xu 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
60136041c691SPeter Xu 	 * registered, we firstly wr-protect a none pte which has no page cache
60146041c691SPeter Xu 	 * page backing it, then access the page.
60156041c691SPeter Xu 	 */
60166041c691SPeter Xu 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
60178fb5debcSMike Kravetz 		goto out_release_unlock;
60188fb5debcSMike Kravetz 
6019ab74ef70SMiaohe Lin 	if (page_in_pagecache) {
6020fb3d824dSDavid Hildenbrand 		page_dup_file_rmap(page, true);
60211c9e8defSMike Kravetz 	} else {
6022d6995da3SMike Kravetz 		ClearHPageRestoreReserve(page);
60238fb5debcSMike Kravetz 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
60241c9e8defSMike Kravetz 	}
60258fb5debcSMike Kravetz 
60266041c691SPeter Xu 	/*
60276041c691SPeter Xu 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
60286041c691SPeter Xu 	 * with wp flag set, don't set pte write bit.
60296041c691SPeter Xu 	 */
60306041c691SPeter Xu 	if (wp_copy || (is_continue && !vm_shared))
6031f6191471SAxel Rasmussen 		writable = 0;
6032f6191471SAxel Rasmussen 	else
6033f6191471SAxel Rasmussen 		writable = dst_vma->vm_flags & VM_WRITE;
6034f6191471SAxel Rasmussen 
6035f6191471SAxel Rasmussen 	_dst_pte = make_huge_pte(dst_vma, page, writable);
60366041c691SPeter Xu 	/*
60376041c691SPeter Xu 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
60386041c691SPeter Xu 	 * extremely important for hugetlbfs for now since swapping is not
60396041c691SPeter Xu 	 * supported, but we should still be clear in that this page cannot be
60406041c691SPeter Xu 	 * thrown away at will, even if write bit not set.
60416041c691SPeter Xu 	 */
60428fb5debcSMike Kravetz 	_dst_pte = huge_pte_mkdirty(_dst_pte);
60438fb5debcSMike Kravetz 	_dst_pte = pte_mkyoung(_dst_pte);
60448fb5debcSMike Kravetz 
60456041c691SPeter Xu 	if (wp_copy)
60466041c691SPeter Xu 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
60476041c691SPeter Xu 
60488fb5debcSMike Kravetz 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
60498fb5debcSMike Kravetz 
60508fb5debcSMike Kravetz 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
60518fb5debcSMike Kravetz 
60528fb5debcSMike Kravetz 	/* No need to invalidate - it was non-present before */
60538fb5debcSMike Kravetz 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
60548fb5debcSMike Kravetz 
60558fb5debcSMike Kravetz 	spin_unlock(ptl);
6056f6191471SAxel Rasmussen 	if (!is_continue)
60578f251a3dSMike Kravetz 		SetHPageMigratable(page);
6058f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
60591c9e8defSMike Kravetz 		unlock_page(page);
60608fb5debcSMike Kravetz 	ret = 0;
60618fb5debcSMike Kravetz out:
60628fb5debcSMike Kravetz 	return ret;
60638fb5debcSMike Kravetz out_release_unlock:
60648fb5debcSMike Kravetz 	spin_unlock(ptl);
6065f6191471SAxel Rasmussen 	if (vm_shared || is_continue)
60661c9e8defSMike Kravetz 		unlock_page(page);
60675af10dfdSAndrea Arcangeli out_release_nounlock:
6068cc30042dSMina Almasry 	if (!page_in_pagecache)
6069846be085SMike Kravetz 		restore_reserve_on_error(h, dst_vma, dst_addr, page);
60708fb5debcSMike Kravetz 	put_page(page);
60718fb5debcSMike Kravetz 	goto out;
60728fb5debcSMike Kravetz }
6073714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
60748fb5debcSMike Kravetz 
607582e5d378SJoao Martins static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
607682e5d378SJoao Martins 				 int refs, struct page **pages,
607782e5d378SJoao Martins 				 struct vm_area_struct **vmas)
607882e5d378SJoao Martins {
607982e5d378SJoao Martins 	int nr;
608082e5d378SJoao Martins 
608182e5d378SJoao Martins 	for (nr = 0; nr < refs; nr++) {
608282e5d378SJoao Martins 		if (likely(pages))
608314455eabSCheng Li 			pages[nr] = nth_page(page, nr);
608482e5d378SJoao Martins 		if (vmas)
608582e5d378SJoao Martins 			vmas[nr] = vma;
608682e5d378SJoao Martins 	}
608782e5d378SJoao Martins }
608882e5d378SJoao Martins 
6089a7f22660SDavid Hildenbrand static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
6090a7f22660SDavid Hildenbrand 					       bool *unshare)
6091a7f22660SDavid Hildenbrand {
6092a7f22660SDavid Hildenbrand 	pte_t pteval = huge_ptep_get(pte);
6093a7f22660SDavid Hildenbrand 
6094a7f22660SDavid Hildenbrand 	*unshare = false;
6095a7f22660SDavid Hildenbrand 	if (is_swap_pte(pteval))
6096a7f22660SDavid Hildenbrand 		return true;
6097a7f22660SDavid Hildenbrand 	if (huge_pte_write(pteval))
6098a7f22660SDavid Hildenbrand 		return false;
6099a7f22660SDavid Hildenbrand 	if (flags & FOLL_WRITE)
6100a7f22660SDavid Hildenbrand 		return true;
6101a7f22660SDavid Hildenbrand 	if (gup_must_unshare(flags, pte_page(pteval))) {
6102a7f22660SDavid Hildenbrand 		*unshare = true;
6103a7f22660SDavid Hildenbrand 		return true;
6104a7f22660SDavid Hildenbrand 	}
6105a7f22660SDavid Hildenbrand 	return false;
6106a7f22660SDavid Hildenbrand }
6107a7f22660SDavid Hildenbrand 
610828a35716SMichel Lespinasse long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
610963551ae0SDavid Gibson 			 struct page **pages, struct vm_area_struct **vmas,
611028a35716SMichel Lespinasse 			 unsigned long *position, unsigned long *nr_pages,
61114f6da934SPeter Xu 			 long i, unsigned int flags, int *locked)
611263551ae0SDavid Gibson {
6113d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
6114d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
611528a35716SMichel Lespinasse 	unsigned long remainder = *nr_pages;
6116a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
61170fa5bc40SJoao Martins 	int err = -EFAULT, refs;
611863551ae0SDavid Gibson 
611963551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
612063551ae0SDavid Gibson 		pte_t *pte;
6121cb900f41SKirill A. Shutemov 		spinlock_t *ptl = NULL;
6122a7f22660SDavid Hildenbrand 		bool unshare = false;
61232a15efc9SHugh Dickins 		int absent;
612463551ae0SDavid Gibson 		struct page *page;
612563551ae0SDavid Gibson 
61264c887265SAdam Litke 		/*
612702057967SDavid Rientjes 		 * If we have a pending SIGKILL, don't keep faulting pages and
612802057967SDavid Rientjes 		 * potentially allocating memory.
612902057967SDavid Rientjes 		 */
6130fa45f116SDavidlohr Bueso 		if (fatal_signal_pending(current)) {
613102057967SDavid Rientjes 			remainder = 0;
613202057967SDavid Rientjes 			break;
613302057967SDavid Rientjes 		}
613402057967SDavid Rientjes 
613502057967SDavid Rientjes 		/*
61364c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
61372a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
61384c887265SAdam Litke 		 * first, for the page indexing below to work.
6139cb900f41SKirill A. Shutemov 		 *
6140cb900f41SKirill A. Shutemov 		 * Note that page table lock is not held when pte is null.
61414c887265SAdam Litke 		 */
61427868a208SPunit Agrawal 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
61437868a208SPunit Agrawal 				      huge_page_size(h));
6144cb900f41SKirill A. Shutemov 		if (pte)
6145cb900f41SKirill A. Shutemov 			ptl = huge_pte_lock(h, mm, pte);
61462a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
614763551ae0SDavid Gibson 
61482a15efc9SHugh Dickins 		/*
61492a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
61503ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
61513ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
61523ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
61533ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
61542a15efc9SHugh Dickins 		 */
61553ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
61563ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
6157cb900f41SKirill A. Shutemov 			if (pte)
6158cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
61592a15efc9SHugh Dickins 			remainder = 0;
61602a15efc9SHugh Dickins 			break;
61612a15efc9SHugh Dickins 		}
61622a15efc9SHugh Dickins 
61639cc3a5bdSNaoya Horiguchi 		/*
61649cc3a5bdSNaoya Horiguchi 		 * We need call hugetlb_fault for both hugepages under migration
61659cc3a5bdSNaoya Horiguchi 		 * (in which case hugetlb_fault waits for the migration,) and
61669cc3a5bdSNaoya Horiguchi 		 * hwpoisoned hugepages (in which case we need to prevent the
61679cc3a5bdSNaoya Horiguchi 		 * caller from accessing to them.) In order to do this, we use
61689cc3a5bdSNaoya Horiguchi 		 * here is_swap_pte instead of is_hugetlb_entry_migration and
61699cc3a5bdSNaoya Horiguchi 		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
61709cc3a5bdSNaoya Horiguchi 		 * both cases, and because we can't follow correct pages
61719cc3a5bdSNaoya Horiguchi 		 * directly from any kind of swap entries.
61729cc3a5bdSNaoya Horiguchi 		 */
6173a7f22660SDavid Hildenbrand 		if (absent ||
6174a7f22660SDavid Hildenbrand 		    __follow_hugetlb_must_fault(flags, pte, &unshare)) {
61752b740303SSouptick Joarder 			vm_fault_t ret;
617687ffc118SAndrea Arcangeli 			unsigned int fault_flags = 0;
61774c887265SAdam Litke 
6178cb900f41SKirill A. Shutemov 			if (pte)
6179cb900f41SKirill A. Shutemov 				spin_unlock(ptl);
618087ffc118SAndrea Arcangeli 			if (flags & FOLL_WRITE)
618187ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_WRITE;
6182a7f22660SDavid Hildenbrand 			else if (unshare)
6183a7f22660SDavid Hildenbrand 				fault_flags |= FAULT_FLAG_UNSHARE;
61844f6da934SPeter Xu 			if (locked)
618571335f37SPeter Xu 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
618671335f37SPeter Xu 					FAULT_FLAG_KILLABLE;
618787ffc118SAndrea Arcangeli 			if (flags & FOLL_NOWAIT)
618887ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
618987ffc118SAndrea Arcangeli 					FAULT_FLAG_RETRY_NOWAIT;
619087ffc118SAndrea Arcangeli 			if (flags & FOLL_TRIED) {
61914426e945SPeter Xu 				/*
61924426e945SPeter Xu 				 * Note: FAULT_FLAG_ALLOW_RETRY and
61934426e945SPeter Xu 				 * FAULT_FLAG_TRIED can co-exist
61944426e945SPeter Xu 				 */
619587ffc118SAndrea Arcangeli 				fault_flags |= FAULT_FLAG_TRIED;
619687ffc118SAndrea Arcangeli 			}
619787ffc118SAndrea Arcangeli 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
619887ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_ERROR) {
61992be7cfedSDaniel Jordan 				err = vm_fault_to_errno(ret, flags);
62001c59827dSHugh Dickins 				remainder = 0;
62011c59827dSHugh Dickins 				break;
62021c59827dSHugh Dickins 			}
620387ffc118SAndrea Arcangeli 			if (ret & VM_FAULT_RETRY) {
62044f6da934SPeter Xu 				if (locked &&
62051ac25013SAndrea Arcangeli 				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
62064f6da934SPeter Xu 					*locked = 0;
620787ffc118SAndrea Arcangeli 				*nr_pages = 0;
620887ffc118SAndrea Arcangeli 				/*
620987ffc118SAndrea Arcangeli 				 * VM_FAULT_RETRY must not return an
621087ffc118SAndrea Arcangeli 				 * error, it will return zero
621187ffc118SAndrea Arcangeli 				 * instead.
621287ffc118SAndrea Arcangeli 				 *
621387ffc118SAndrea Arcangeli 				 * No need to update "position" as the
621487ffc118SAndrea Arcangeli 				 * caller will not check it after
621587ffc118SAndrea Arcangeli 				 * *nr_pages is set to 0.
621687ffc118SAndrea Arcangeli 				 */
621787ffc118SAndrea Arcangeli 				return i;
621887ffc118SAndrea Arcangeli 			}
621987ffc118SAndrea Arcangeli 			continue;
622087ffc118SAndrea Arcangeli 		}
622163551ae0SDavid Gibson 
6222a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
62237f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
62248fde12caSLinus Torvalds 
6225b6a2619cSDavid Hildenbrand 		VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
6226b6a2619cSDavid Hildenbrand 			       !PageAnonExclusive(page), page);
6227b6a2619cSDavid Hildenbrand 
62288fde12caSLinus Torvalds 		/*
6229acbfb087SZhigang Lu 		 * If subpage information not requested, update counters
6230acbfb087SZhigang Lu 		 * and skip the same_page loop below.
6231acbfb087SZhigang Lu 		 */
6232acbfb087SZhigang Lu 		if (!pages && !vmas && !pfn_offset &&
6233acbfb087SZhigang Lu 		    (vaddr + huge_page_size(h) < vma->vm_end) &&
6234acbfb087SZhigang Lu 		    (remainder >= pages_per_huge_page(h))) {
6235acbfb087SZhigang Lu 			vaddr += huge_page_size(h);
6236acbfb087SZhigang Lu 			remainder -= pages_per_huge_page(h);
6237acbfb087SZhigang Lu 			i += pages_per_huge_page(h);
6238acbfb087SZhigang Lu 			spin_unlock(ptl);
6239acbfb087SZhigang Lu 			continue;
6240acbfb087SZhigang Lu 		}
6241acbfb087SZhigang Lu 
6242d08af0a5SJoao Martins 		/* vaddr may not be aligned to PAGE_SIZE */
6243d08af0a5SJoao Martins 		refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
6244d08af0a5SJoao Martins 		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
62450fa5bc40SJoao Martins 
624682e5d378SJoao Martins 		if (pages || vmas)
624714455eabSCheng Li 			record_subpages_vmas(nth_page(page, pfn_offset),
624882e5d378SJoao Martins 					     vma, refs,
624982e5d378SJoao Martins 					     likely(pages) ? pages + i : NULL,
625082e5d378SJoao Martins 					     vmas ? vmas + i : NULL);
625163551ae0SDavid Gibson 
625282e5d378SJoao Martins 		if (pages) {
62530fa5bc40SJoao Martins 			/*
6254822951d8SMatthew Wilcox (Oracle) 			 * try_grab_folio() should always succeed here,
62550fa5bc40SJoao Martins 			 * because: a) we hold the ptl lock, and b) we've just
62560fa5bc40SJoao Martins 			 * checked that the huge page is present in the page
62570fa5bc40SJoao Martins 			 * tables. If the huge page is present, then the tail
62580fa5bc40SJoao Martins 			 * pages must also be present. The ptl prevents the
62590fa5bc40SJoao Martins 			 * head page and tail pages from being rearranged in
62600fa5bc40SJoao Martins 			 * any way. So this page must be available at this
62610fa5bc40SJoao Martins 			 * point, unless the page refcount overflowed:
62620fa5bc40SJoao Martins 			 */
6263822951d8SMatthew Wilcox (Oracle) 			if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
62640fa5bc40SJoao Martins 							 flags))) {
62650fa5bc40SJoao Martins 				spin_unlock(ptl);
62660fa5bc40SJoao Martins 				remainder = 0;
62670fa5bc40SJoao Martins 				err = -ENOMEM;
62680fa5bc40SJoao Martins 				break;
62690fa5bc40SJoao Martins 			}
6270d5d4b0aaSChen, Kenneth W 		}
627182e5d378SJoao Martins 
627282e5d378SJoao Martins 		vaddr += (refs << PAGE_SHIFT);
627382e5d378SJoao Martins 		remainder -= refs;
627482e5d378SJoao Martins 		i += refs;
627582e5d378SJoao Martins 
6276cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
627763551ae0SDavid Gibson 	}
627828a35716SMichel Lespinasse 	*nr_pages = remainder;
627987ffc118SAndrea Arcangeli 	/*
628087ffc118SAndrea Arcangeli 	 * setting position is actually required only if remainder is
628187ffc118SAndrea Arcangeli 	 * not zero but it's faster not to add a "if (remainder)"
628287ffc118SAndrea Arcangeli 	 * branch.
628387ffc118SAndrea Arcangeli 	 */
628463551ae0SDavid Gibson 	*position = vaddr;
628563551ae0SDavid Gibson 
62862be7cfedSDaniel Jordan 	return i ? i : err;
628763551ae0SDavid Gibson }
62888f860591SZhang, Yanmin 
62897da4d641SPeter Zijlstra unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
62905a90d5a1SPeter Xu 		unsigned long address, unsigned long end,
62915a90d5a1SPeter Xu 		pgprot_t newprot, unsigned long cp_flags)
62928f860591SZhang, Yanmin {
62938f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
62948f860591SZhang, Yanmin 	unsigned long start = address;
62958f860591SZhang, Yanmin 	pte_t *ptep;
62968f860591SZhang, Yanmin 	pte_t pte;
6297a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
629860dfaad6SPeter Xu 	unsigned long pages = 0, psize = huge_page_size(h);
6299dff11abeSMike Kravetz 	bool shared_pmd = false;
6300ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
6301e95a9851SMike Kravetz 	unsigned long last_addr_mask;
63025a90d5a1SPeter Xu 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
63035a90d5a1SPeter Xu 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6304dff11abeSMike Kravetz 
6305dff11abeSMike Kravetz 	/*
6306dff11abeSMike Kravetz 	 * In the case of shared PMDs, the area to flush could be beyond
6307ac46d4f3SJérôme Glisse 	 * start/end.  Set range.start/range.end to cover the maximum possible
6308dff11abeSMike Kravetz 	 * range if PMD sharing is possible.
6309dff11abeSMike Kravetz 	 */
63107269f999SJérôme Glisse 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
63117269f999SJérôme Glisse 				0, vma, mm, start, end);
6312ac46d4f3SJérôme Glisse 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
63138f860591SZhang, Yanmin 
63148f860591SZhang, Yanmin 	BUG_ON(address >= end);
6315ac46d4f3SJérôme Glisse 	flush_cache_range(vma, range.start, range.end);
63168f860591SZhang, Yanmin 
6317ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_start(&range);
6318e95a9851SMike Kravetz 	last_addr_mask = hugetlb_mask_last_page(h);
631983cde9e8SDavidlohr Bueso 	i_mmap_lock_write(vma->vm_file->f_mapping);
632060dfaad6SPeter Xu 	for (; address < end; address += psize) {
6321cb900f41SKirill A. Shutemov 		spinlock_t *ptl;
632260dfaad6SPeter Xu 		ptep = huge_pte_offset(mm, address, psize);
6323e95a9851SMike Kravetz 		if (!ptep) {
6324e95a9851SMike Kravetz 			address |= last_addr_mask;
63258f860591SZhang, Yanmin 			continue;
6326e95a9851SMike Kravetz 		}
6327cb900f41SKirill A. Shutemov 		ptl = huge_pte_lock(h, mm, ptep);
63284ddb4d91SMike Kravetz 		if (huge_pmd_unshare(mm, vma, address, ptep)) {
632960dfaad6SPeter Xu 			/*
633060dfaad6SPeter Xu 			 * When uffd-wp is enabled on the vma, unshare
633160dfaad6SPeter Xu 			 * shouldn't happen at all.  Warn about it if it
633260dfaad6SPeter Xu 			 * happened due to some reason.
633360dfaad6SPeter Xu 			 */
633460dfaad6SPeter Xu 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
63357da4d641SPeter Zijlstra 			pages++;
6336cb900f41SKirill A. Shutemov 			spin_unlock(ptl);
6337dff11abeSMike Kravetz 			shared_pmd = true;
63384ddb4d91SMike Kravetz 			address |= last_addr_mask;
633939dde65cSChen, Kenneth W 			continue;
63407da4d641SPeter Zijlstra 		}
6341a8bda28dSNaoya Horiguchi 		pte = huge_ptep_get(ptep);
6342a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6343a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6344a8bda28dSNaoya Horiguchi 			continue;
6345a8bda28dSNaoya Horiguchi 		}
6346a8bda28dSNaoya Horiguchi 		if (unlikely(is_hugetlb_entry_migration(pte))) {
6347a8bda28dSNaoya Horiguchi 			swp_entry_t entry = pte_to_swp_entry(pte);
63486c287605SDavid Hildenbrand 			struct page *page = pfn_swap_entry_to_page(entry);
6349a8bda28dSNaoya Horiguchi 
63506c287605SDavid Hildenbrand 			if (!is_readable_migration_entry(entry)) {
6351a8bda28dSNaoya Horiguchi 				pte_t newpte;
6352a8bda28dSNaoya Horiguchi 
63536c287605SDavid Hildenbrand 				if (PageAnon(page))
63546c287605SDavid Hildenbrand 					entry = make_readable_exclusive_migration_entry(
63556c287605SDavid Hildenbrand 								swp_offset(entry));
63566c287605SDavid Hildenbrand 				else
63574dd845b5SAlistair Popple 					entry = make_readable_migration_entry(
63584dd845b5SAlistair Popple 								swp_offset(entry));
6359a8bda28dSNaoya Horiguchi 				newpte = swp_entry_to_pte(entry);
63605a90d5a1SPeter Xu 				if (uffd_wp)
63615a90d5a1SPeter Xu 					newpte = pte_swp_mkuffd_wp(newpte);
63625a90d5a1SPeter Xu 				else if (uffd_wp_resolve)
63635a90d5a1SPeter Xu 					newpte = pte_swp_clear_uffd_wp(newpte);
636418f39629SQi Zheng 				set_huge_pte_at(mm, address, ptep, newpte);
6365a8bda28dSNaoya Horiguchi 				pages++;
6366a8bda28dSNaoya Horiguchi 			}
6367a8bda28dSNaoya Horiguchi 			spin_unlock(ptl);
6368a8bda28dSNaoya Horiguchi 			continue;
6369a8bda28dSNaoya Horiguchi 		}
637060dfaad6SPeter Xu 		if (unlikely(pte_marker_uffd_wp(pte))) {
637160dfaad6SPeter Xu 			/*
637260dfaad6SPeter Xu 			 * This is changing a non-present pte into a none pte,
637360dfaad6SPeter Xu 			 * no need for huge_ptep_modify_prot_start/commit().
637460dfaad6SPeter Xu 			 */
637560dfaad6SPeter Xu 			if (uffd_wp_resolve)
637660dfaad6SPeter Xu 				huge_pte_clear(mm, address, ptep, psize);
637760dfaad6SPeter Xu 		}
6378a8bda28dSNaoya Horiguchi 		if (!huge_pte_none(pte)) {
6379023bdd00SAneesh Kumar K.V 			pte_t old_pte;
638079c1c594SChristophe Leroy 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6381023bdd00SAneesh Kumar K.V 
6382023bdd00SAneesh Kumar K.V 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
638316785bd7SAnshuman Khandual 			pte = huge_pte_modify(old_pte, newprot);
638479c1c594SChristophe Leroy 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
63855a90d5a1SPeter Xu 			if (uffd_wp)
63865a90d5a1SPeter Xu 				pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte));
63875a90d5a1SPeter Xu 			else if (uffd_wp_resolve)
63885a90d5a1SPeter Xu 				pte = huge_pte_clear_uffd_wp(pte);
6389023bdd00SAneesh Kumar K.V 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
63907da4d641SPeter Zijlstra 			pages++;
639160dfaad6SPeter Xu 		} else {
639260dfaad6SPeter Xu 			/* None pte */
639360dfaad6SPeter Xu 			if (unlikely(uffd_wp))
639460dfaad6SPeter Xu 				/* Safe to modify directly (none->non-present). */
639560dfaad6SPeter Xu 				set_huge_pte_at(mm, address, ptep,
639660dfaad6SPeter Xu 						make_pte_marker(PTE_MARKER_UFFD_WP));
63978f860591SZhang, Yanmin 		}
6398cb900f41SKirill A. Shutemov 		spin_unlock(ptl);
63998f860591SZhang, Yanmin 	}
6400d833352aSMel Gorman 	/*
6401c8c06efaSDavidlohr Bueso 	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6402d833352aSMel Gorman 	 * may have cleared our pud entry and done put_page on the page table:
6403c8c06efaSDavidlohr Bueso 	 * once we release i_mmap_rwsem, another task can do the final put_page
6404dff11abeSMike Kravetz 	 * and that page table be reused and filled with junk.  If we actually
6405dff11abeSMike Kravetz 	 * did unshare a page of pmds, flush the range corresponding to the pud.
6406d833352aSMel Gorman 	 */
6407dff11abeSMike Kravetz 	if (shared_pmd)
6408ac46d4f3SJérôme Glisse 		flush_hugetlb_tlb_range(vma, range.start, range.end);
6409dff11abeSMike Kravetz 	else
64105491ae7bSAneesh Kumar K.V 		flush_hugetlb_tlb_range(vma, start, end);
64110f10851eSJérôme Glisse 	/*
64120f10851eSJérôme Glisse 	 * No need to call mmu_notifier_invalidate_range() we are downgrading
64130f10851eSJérôme Glisse 	 * page table protection not changing it to point to a new page.
64140f10851eSJérôme Glisse 	 *
6415ee65728eSMike Rapoport 	 * See Documentation/mm/mmu_notifier.rst
64160f10851eSJérôme Glisse 	 */
641783cde9e8SDavidlohr Bueso 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6418ac46d4f3SJérôme Glisse 	mmu_notifier_invalidate_range_end(&range);
64197da4d641SPeter Zijlstra 
64207da4d641SPeter Zijlstra 	return pages << h->order;
64218f860591SZhang, Yanmin }
64228f860591SZhang, Yanmin 
642333b8f84aSMike Kravetz /* Return true if reservation was successful, false otherwise.  */
642433b8f84aSMike Kravetz bool hugetlb_reserve_pages(struct inode *inode,
6425a1e78772SMel Gorman 					long from, long to,
64265a6fe125SMel Gorman 					struct vm_area_struct *vma,
6427ca16d140SKOSAKI Motohiro 					vm_flags_t vm_flags)
6428e4e574b7SAdam Litke {
642933b8f84aSMike Kravetz 	long chg, add = -1;
6430a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
643190481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
64329119a41eSJoonsoo Kim 	struct resv_map *resv_map;
6433075a61d0SMina Almasry 	struct hugetlb_cgroup *h_cg = NULL;
64340db9d74eSMina Almasry 	long gbl_reserve, regions_needed = 0;
6435e4e574b7SAdam Litke 
643663489f8eSMike Kravetz 	/* This should never happen */
643763489f8eSMike Kravetz 	if (from > to) {
643863489f8eSMike Kravetz 		VM_WARN(1, "%s called with a negative range\n", __func__);
643933b8f84aSMike Kravetz 		return false;
644063489f8eSMike Kravetz 	}
644163489f8eSMike Kravetz 
6442a1e78772SMel Gorman 	/*
644317c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
644417c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
644590481622SDavid Gibson 	 * without using reserves
644617c9d12eSMel Gorman 	 */
6447ca16d140SKOSAKI Motohiro 	if (vm_flags & VM_NORESERVE)
644833b8f84aSMike Kravetz 		return true;
644917c9d12eSMel Gorman 
645017c9d12eSMel Gorman 	/*
6451a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
6452a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
6453a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
6454a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
6455a1e78772SMel Gorman 	 */
64569119a41eSJoonsoo Kim 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6457f27a5136SMike Kravetz 		/*
6458f27a5136SMike Kravetz 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6459f27a5136SMike Kravetz 		 * called for inodes for which resv_maps were created (see
6460f27a5136SMike Kravetz 		 * hugetlbfs_get_inode).
6461f27a5136SMike Kravetz 		 */
64624e35f483SJoonsoo Kim 		resv_map = inode_resv_map(inode);
64639119a41eSJoonsoo Kim 
64640db9d74eSMina Almasry 		chg = region_chg(resv_map, from, to, &regions_needed);
64659119a41eSJoonsoo Kim 
64669119a41eSJoonsoo Kim 	} else {
6467e9fe92aeSMina Almasry 		/* Private mapping. */
64689119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
64695a6fe125SMel Gorman 		if (!resv_map)
647033b8f84aSMike Kravetz 			return false;
64715a6fe125SMel Gorman 
647217c9d12eSMel Gorman 		chg = to - from;
647317c9d12eSMel Gorman 
64745a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
64755a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
64765a6fe125SMel Gorman 	}
64775a6fe125SMel Gorman 
647833b8f84aSMike Kravetz 	if (chg < 0)
6479c50ac050SDave Hansen 		goto out_err;
648017c9d12eSMel Gorman 
648133b8f84aSMike Kravetz 	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
648233b8f84aSMike Kravetz 				chg * pages_per_huge_page(h), &h_cg) < 0)
6483075a61d0SMina Almasry 		goto out_err;
6484075a61d0SMina Almasry 
6485075a61d0SMina Almasry 	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6486075a61d0SMina Almasry 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6487075a61d0SMina Almasry 		 * of the resv_map.
6488075a61d0SMina Almasry 		 */
6489075a61d0SMina Almasry 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6490075a61d0SMina Almasry 	}
6491075a61d0SMina Almasry 
64921c5ecae3SMike Kravetz 	/*
64931c5ecae3SMike Kravetz 	 * There must be enough pages in the subpool for the mapping. If
64941c5ecae3SMike Kravetz 	 * the subpool has a minimum size, there may be some global
64951c5ecae3SMike Kravetz 	 * reservations already in place (gbl_reserve).
64961c5ecae3SMike Kravetz 	 */
64971c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
649833b8f84aSMike Kravetz 	if (gbl_reserve < 0)
6499075a61d0SMina Almasry 		goto out_uncharge_cgroup;
650017c9d12eSMel Gorman 
650117c9d12eSMel Gorman 	/*
650217c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
650390481622SDavid Gibson 	 * Hand the pages back to the subpool if there are not
650417c9d12eSMel Gorman 	 */
650533b8f84aSMike Kravetz 	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6506075a61d0SMina Almasry 		goto out_put_pages;
650717c9d12eSMel Gorman 
650817c9d12eSMel Gorman 	/*
650917c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
651017c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
651117c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
651217c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
651317c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
651417c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
651517c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
651617c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
651717c9d12eSMel Gorman 	 * else has to be done for private mappings here
651817c9d12eSMel Gorman 	 */
651933039678SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE) {
6520075a61d0SMina Almasry 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
652133039678SMike Kravetz 
65220db9d74eSMina Almasry 		if (unlikely(add < 0)) {
65230db9d74eSMina Almasry 			hugetlb_acct_memory(h, -gbl_reserve);
6524075a61d0SMina Almasry 			goto out_put_pages;
65250db9d74eSMina Almasry 		} else if (unlikely(chg > add)) {
652633039678SMike Kravetz 			/*
652733039678SMike Kravetz 			 * pages in this range were added to the reserve
652833039678SMike Kravetz 			 * map between region_chg and region_add.  This
652933039678SMike Kravetz 			 * indicates a race with alloc_huge_page.  Adjust
653033039678SMike Kravetz 			 * the subpool and reserve counts modified above
653133039678SMike Kravetz 			 * based on the difference.
653233039678SMike Kravetz 			 */
653333039678SMike Kravetz 			long rsv_adjust;
653433039678SMike Kravetz 
6535d85aecf2SMiaohe Lin 			/*
6536d85aecf2SMiaohe Lin 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6537d85aecf2SMiaohe Lin 			 * reference to h_cg->css. See comment below for detail.
6538d85aecf2SMiaohe Lin 			 */
6539075a61d0SMina Almasry 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6540075a61d0SMina Almasry 				hstate_index(h),
6541075a61d0SMina Almasry 				(chg - add) * pages_per_huge_page(h), h_cg);
6542075a61d0SMina Almasry 
654333039678SMike Kravetz 			rsv_adjust = hugepage_subpool_put_pages(spool,
654433039678SMike Kravetz 								chg - add);
654533039678SMike Kravetz 			hugetlb_acct_memory(h, -rsv_adjust);
6546d85aecf2SMiaohe Lin 		} else if (h_cg) {
6547d85aecf2SMiaohe Lin 			/*
6548d85aecf2SMiaohe Lin 			 * The file_regions will hold their own reference to
6549d85aecf2SMiaohe Lin 			 * h_cg->css. So we should release the reference held
6550d85aecf2SMiaohe Lin 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6551d85aecf2SMiaohe Lin 			 * done.
6552d85aecf2SMiaohe Lin 			 */
6553d85aecf2SMiaohe Lin 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
655433039678SMike Kravetz 		}
655533039678SMike Kravetz 	}
655633b8f84aSMike Kravetz 	return true;
655733b8f84aSMike Kravetz 
6558075a61d0SMina Almasry out_put_pages:
6559075a61d0SMina Almasry 	/* put back original number of pages, chg */
6560075a61d0SMina Almasry 	(void)hugepage_subpool_put_pages(spool, chg);
6561075a61d0SMina Almasry out_uncharge_cgroup:
6562075a61d0SMina Almasry 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6563075a61d0SMina Almasry 					    chg * pages_per_huge_page(h), h_cg);
6564c50ac050SDave Hansen out_err:
65655e911373SMike Kravetz 	if (!vma || vma->vm_flags & VM_MAYSHARE)
65660db9d74eSMina Almasry 		/* Only call region_abort if the region_chg succeeded but the
65670db9d74eSMina Almasry 		 * region_add failed or didn't run.
65680db9d74eSMina Almasry 		 */
65690db9d74eSMina Almasry 		if (chg >= 0 && add < 0)
65700db9d74eSMina Almasry 			region_abort(resv_map, from, to, regions_needed);
6571f031dd27SJoonsoo Kim 	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
6572f031dd27SJoonsoo Kim 		kref_put(&resv_map->refs, resv_map_release);
657333b8f84aSMike Kravetz 	return false;
6574a43a8c39SChen, Kenneth W }
6575a43a8c39SChen, Kenneth W 
6576b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6577b5cec28dSMike Kravetz 								long freed)
6578a43a8c39SChen, Kenneth W {
6579a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
65804e35f483SJoonsoo Kim 	struct resv_map *resv_map = inode_resv_map(inode);
65819119a41eSJoonsoo Kim 	long chg = 0;
658290481622SDavid Gibson 	struct hugepage_subpool *spool = subpool_inode(inode);
65831c5ecae3SMike Kravetz 	long gbl_reserve;
658445c682a6SKen Chen 
6585f27a5136SMike Kravetz 	/*
6586f27a5136SMike Kravetz 	 * Since this routine can be called in the evict inode path for all
6587f27a5136SMike Kravetz 	 * hugetlbfs inodes, resv_map could be NULL.
6588f27a5136SMike Kravetz 	 */
6589b5cec28dSMike Kravetz 	if (resv_map) {
6590b5cec28dSMike Kravetz 		chg = region_del(resv_map, start, end);
6591b5cec28dSMike Kravetz 		/*
6592b5cec28dSMike Kravetz 		 * region_del() can fail in the rare case where a region
6593b5cec28dSMike Kravetz 		 * must be split and another region descriptor can not be
6594b5cec28dSMike Kravetz 		 * allocated.  If end == LONG_MAX, it will not fail.
6595b5cec28dSMike Kravetz 		 */
6596b5cec28dSMike Kravetz 		if (chg < 0)
6597b5cec28dSMike Kravetz 			return chg;
6598b5cec28dSMike Kravetz 	}
6599b5cec28dSMike Kravetz 
660045c682a6SKen Chen 	spin_lock(&inode->i_lock);
6601e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
660245c682a6SKen Chen 	spin_unlock(&inode->i_lock);
660345c682a6SKen Chen 
66041c5ecae3SMike Kravetz 	/*
66051c5ecae3SMike Kravetz 	 * If the subpool has a minimum size, the number of global
66061c5ecae3SMike Kravetz 	 * reservations to be released may be adjusted.
6607dddf31a4SMiaohe Lin 	 *
6608dddf31a4SMiaohe Lin 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6609dddf31a4SMiaohe Lin 	 * won't go negative.
66101c5ecae3SMike Kravetz 	 */
66111c5ecae3SMike Kravetz 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
66121c5ecae3SMike Kravetz 	hugetlb_acct_memory(h, -gbl_reserve);
6613b5cec28dSMike Kravetz 
6614b5cec28dSMike Kravetz 	return 0;
6615a43a8c39SChen, Kenneth W }
661693f70f90SNaoya Horiguchi 
66173212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
66183212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
66193212b535SSteve Capper 				struct vm_area_struct *vma,
66203212b535SSteve Capper 				unsigned long addr, pgoff_t idx)
66213212b535SSteve Capper {
66223212b535SSteve Capper 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
66233212b535SSteve Capper 				svma->vm_start;
66243212b535SSteve Capper 	unsigned long sbase = saddr & PUD_MASK;
66253212b535SSteve Capper 	unsigned long s_end = sbase + PUD_SIZE;
66263212b535SSteve Capper 
66273212b535SSteve Capper 	/* Allow segments to share if only one is marked locked */
6628de60f5f1SEric B Munson 	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
6629de60f5f1SEric B Munson 	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
66303212b535SSteve Capper 
66313212b535SSteve Capper 	/*
66323212b535SSteve Capper 	 * match the virtual addresses, permission and the alignment of the
66333212b535SSteve Capper 	 * page table page.
66343212b535SSteve Capper 	 */
66353212b535SSteve Capper 	if (pmd_index(addr) != pmd_index(saddr) ||
66363212b535SSteve Capper 	    vm_flags != svm_flags ||
663707e51edfSMiaohe Lin 	    !range_in_vma(svma, sbase, s_end))
66383212b535SSteve Capper 		return 0;
66393212b535SSteve Capper 
66403212b535SSteve Capper 	return saddr;
66413212b535SSteve Capper }
66423212b535SSteve Capper 
664331aafb45SNicholas Krause static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
66443212b535SSteve Capper {
66453212b535SSteve Capper 	unsigned long base = addr & PUD_MASK;
66463212b535SSteve Capper 	unsigned long end = base + PUD_SIZE;
66473212b535SSteve Capper 
66483212b535SSteve Capper 	/*
66493212b535SSteve Capper 	 * check on proper vm_flags and page table alignment
66503212b535SSteve Capper 	 */
6651017b1660SMike Kravetz 	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
665231aafb45SNicholas Krause 		return true;
665331aafb45SNicholas Krause 	return false;
66543212b535SSteve Capper }
66553212b535SSteve Capper 
6656c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6657c1991e07SPeter Xu {
6658c1991e07SPeter Xu #ifdef CONFIG_USERFAULTFD
6659c1991e07SPeter Xu 	if (uffd_disable_huge_pmd_share(vma))
6660c1991e07SPeter Xu 		return false;
6661c1991e07SPeter Xu #endif
6662c1991e07SPeter Xu 	return vma_shareable(vma, addr);
6663c1991e07SPeter Xu }
6664c1991e07SPeter Xu 
66653212b535SSteve Capper /*
6666017b1660SMike Kravetz  * Determine if start,end range within vma could be mapped by shared pmd.
6667017b1660SMike Kravetz  * If yes, adjust start and end to cover range associated with possible
6668017b1660SMike Kravetz  * shared pmd mappings.
6669017b1660SMike Kravetz  */
6670017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6671017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6672017b1660SMike Kravetz {
6673a1ba9da8SLi Xinhai 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6674a1ba9da8SLi Xinhai 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6675017b1660SMike Kravetz 
6676a1ba9da8SLi Xinhai 	/*
6677f0953a1bSIngo Molnar 	 * vma needs to span at least one aligned PUD size, and the range
6678f0953a1bSIngo Molnar 	 * must be at least partially within in.
6679a1ba9da8SLi Xinhai 	 */
6680a1ba9da8SLi Xinhai 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6681a1ba9da8SLi Xinhai 		(*end <= v_start) || (*start >= v_end))
6682017b1660SMike Kravetz 		return;
6683017b1660SMike Kravetz 
668475802ca6SPeter Xu 	/* Extend the range to be PUD aligned for a worst case scenario */
6685a1ba9da8SLi Xinhai 	if (*start > v_start)
6686a1ba9da8SLi Xinhai 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6687017b1660SMike Kravetz 
6688a1ba9da8SLi Xinhai 	if (*end < v_end)
6689a1ba9da8SLi Xinhai 		*end = ALIGN(*end, PUD_SIZE);
6690017b1660SMike Kravetz }
6691017b1660SMike Kravetz 
6692017b1660SMike Kravetz /*
66933212b535SSteve Capper  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
66943212b535SSteve Capper  * and returns the corresponding pte. While this is not necessary for the
66953212b535SSteve Capper  * !shared pmd case because we can allocate the pmd later as well, it makes the
6696*3a47c54fSMike Kravetz  * code much cleaner. pmd allocation is essential for the shared case because
6697*3a47c54fSMike Kravetz  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6698*3a47c54fSMike Kravetz  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6699*3a47c54fSMike Kravetz  * bad pmd for sharing.
67003212b535SSteve Capper  */
6701aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6702aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
67033212b535SSteve Capper {
67043212b535SSteve Capper 	struct address_space *mapping = vma->vm_file->f_mapping;
67053212b535SSteve Capper 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
67063212b535SSteve Capper 			vma->vm_pgoff;
67073212b535SSteve Capper 	struct vm_area_struct *svma;
67083212b535SSteve Capper 	unsigned long saddr;
67093212b535SSteve Capper 	pte_t *spte = NULL;
67103212b535SSteve Capper 	pte_t *pte;
6711cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
67123212b535SSteve Capper 
6713*3a47c54fSMike Kravetz 	i_mmap_lock_read(mapping);
67143212b535SSteve Capper 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
67153212b535SSteve Capper 		if (svma == vma)
67163212b535SSteve Capper 			continue;
67173212b535SSteve Capper 
67183212b535SSteve Capper 		saddr = page_table_shareable(svma, vma, addr, idx);
67193212b535SSteve Capper 		if (saddr) {
67207868a208SPunit Agrawal 			spte = huge_pte_offset(svma->vm_mm, saddr,
67217868a208SPunit Agrawal 					       vma_mmu_pagesize(svma));
67223212b535SSteve Capper 			if (spte) {
67233212b535SSteve Capper 				get_page(virt_to_page(spte));
67243212b535SSteve Capper 				break;
67253212b535SSteve Capper 			}
67263212b535SSteve Capper 		}
67273212b535SSteve Capper 	}
67283212b535SSteve Capper 
67293212b535SSteve Capper 	if (!spte)
67303212b535SSteve Capper 		goto out;
67313212b535SSteve Capper 
67328bea8052SAneesh Kumar K.V 	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
6733dc6c9a35SKirill A. Shutemov 	if (pud_none(*pud)) {
67343212b535SSteve Capper 		pud_populate(mm, pud,
67353212b535SSteve Capper 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6736c17b1f42SKirill A. Shutemov 		mm_inc_nr_pmds(mm);
6737dc6c9a35SKirill A. Shutemov 	} else {
67383212b535SSteve Capper 		put_page(virt_to_page(spte));
6739dc6c9a35SKirill A. Shutemov 	}
6740cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
67413212b535SSteve Capper out:
67423212b535SSteve Capper 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
6743*3a47c54fSMike Kravetz 	i_mmap_unlock_read(mapping);
67443212b535SSteve Capper 	return pte;
67453212b535SSteve Capper }
67463212b535SSteve Capper 
67473212b535SSteve Capper /*
67483212b535SSteve Capper  * unmap huge page backed by shared pte.
67493212b535SSteve Capper  *
67503212b535SSteve Capper  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
67513212b535SSteve Capper  * indicated by page_count > 1, unmap is achieved by clearing pud and
67523212b535SSteve Capper  * decrementing the ref count. If count == 1, the pte page is not shared.
67533212b535SSteve Capper  *
6754*3a47c54fSMike Kravetz  * Called with page table lock held.
67553212b535SSteve Capper  *
67563212b535SSteve Capper  * returns: 1 successfully unmapped a shared pte page
67573212b535SSteve Capper  *	    0 the underlying pte page is not shared, or it is the last user
67583212b535SSteve Capper  */
675934ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
67604ddb4d91SMike Kravetz 					unsigned long addr, pte_t *ptep)
67613212b535SSteve Capper {
67624ddb4d91SMike Kravetz 	pgd_t *pgd = pgd_offset(mm, addr);
67634ddb4d91SMike Kravetz 	p4d_t *p4d = p4d_offset(pgd, addr);
67644ddb4d91SMike Kravetz 	pud_t *pud = pud_offset(p4d, addr);
67653212b535SSteve Capper 
676634ae204fSMike Kravetz 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
67673212b535SSteve Capper 	BUG_ON(page_count(virt_to_page(ptep)) == 0);
67683212b535SSteve Capper 	if (page_count(virt_to_page(ptep)) == 1)
67693212b535SSteve Capper 		return 0;
67703212b535SSteve Capper 
67713212b535SSteve Capper 	pud_clear(pud);
67723212b535SSteve Capper 	put_page(virt_to_page(ptep));
6773dc6c9a35SKirill A. Shutemov 	mm_dec_nr_pmds(mm);
67743212b535SSteve Capper 	return 1;
67753212b535SSteve Capper }
6776c1991e07SPeter Xu 
67779e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6778aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6779aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud)
67809e5fc74cSSteve Capper {
67819e5fc74cSSteve Capper 	return NULL;
67829e5fc74cSSteve Capper }
6783e81f2d22SZhang Zhen 
678434ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
67854ddb4d91SMike Kravetz 				unsigned long addr, pte_t *ptep)
6786e81f2d22SZhang Zhen {
6787e81f2d22SZhang Zhen 	return 0;
6788e81f2d22SZhang Zhen }
6789017b1660SMike Kravetz 
6790017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6791017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
6792017b1660SMike Kravetz {
6793017b1660SMike Kravetz }
6794c1991e07SPeter Xu 
6795c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6796c1991e07SPeter Xu {
6797c1991e07SPeter Xu 	return false;
6798c1991e07SPeter Xu }
67993212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
68003212b535SSteve Capper 
68019e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6802aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
68039e5fc74cSSteve Capper 			unsigned long addr, unsigned long sz)
68049e5fc74cSSteve Capper {
68059e5fc74cSSteve Capper 	pgd_t *pgd;
6806c2febafcSKirill A. Shutemov 	p4d_t *p4d;
68079e5fc74cSSteve Capper 	pud_t *pud;
68089e5fc74cSSteve Capper 	pte_t *pte = NULL;
68099e5fc74cSSteve Capper 
68109e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6811f4f0a3d8SKirill A. Shutemov 	p4d = p4d_alloc(mm, pgd, addr);
6812f4f0a3d8SKirill A. Shutemov 	if (!p4d)
6813f4f0a3d8SKirill A. Shutemov 		return NULL;
6814c2febafcSKirill A. Shutemov 	pud = pud_alloc(mm, p4d, addr);
68159e5fc74cSSteve Capper 	if (pud) {
68169e5fc74cSSteve Capper 		if (sz == PUD_SIZE) {
68179e5fc74cSSteve Capper 			pte = (pte_t *)pud;
68189e5fc74cSSteve Capper 		} else {
68199e5fc74cSSteve Capper 			BUG_ON(sz != PMD_SIZE);
6820c1991e07SPeter Xu 			if (want_pmd_share(vma, addr) && pud_none(*pud))
6821aec44e0fSPeter Xu 				pte = huge_pmd_share(mm, vma, addr, pud);
68229e5fc74cSSteve Capper 			else
68239e5fc74cSSteve Capper 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
68249e5fc74cSSteve Capper 		}
68259e5fc74cSSteve Capper 	}
68264e666314SMichal Hocko 	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
68279e5fc74cSSteve Capper 
68289e5fc74cSSteve Capper 	return pte;
68299e5fc74cSSteve Capper }
68309e5fc74cSSteve Capper 
68319b19df29SPunit Agrawal /*
68329b19df29SPunit Agrawal  * huge_pte_offset() - Walk the page table to resolve the hugepage
68339b19df29SPunit Agrawal  * entry at address @addr
68349b19df29SPunit Agrawal  *
68358ac0b81aSLi Xinhai  * Return: Pointer to page table entry (PUD or PMD) for
68368ac0b81aSLi Xinhai  * address @addr, or NULL if a !p*d_present() entry is encountered and the
68379b19df29SPunit Agrawal  * size @sz doesn't match the hugepage size at this level of the page
68389b19df29SPunit Agrawal  * table.
68399b19df29SPunit Agrawal  */
68407868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
68417868a208SPunit Agrawal 		       unsigned long addr, unsigned long sz)
68429e5fc74cSSteve Capper {
68439e5fc74cSSteve Capper 	pgd_t *pgd;
6844c2febafcSKirill A. Shutemov 	p4d_t *p4d;
68458ac0b81aSLi Xinhai 	pud_t *pud;
68468ac0b81aSLi Xinhai 	pmd_t *pmd;
68479e5fc74cSSteve Capper 
68489e5fc74cSSteve Capper 	pgd = pgd_offset(mm, addr);
6849c2febafcSKirill A. Shutemov 	if (!pgd_present(*pgd))
6850c2febafcSKirill A. Shutemov 		return NULL;
6851c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
6852c2febafcSKirill A. Shutemov 	if (!p4d_present(*p4d))
6853c2febafcSKirill A. Shutemov 		return NULL;
68549b19df29SPunit Agrawal 
6855c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
68568ac0b81aSLi Xinhai 	if (sz == PUD_SIZE)
68578ac0b81aSLi Xinhai 		/* must be pud huge, non-present or none */
68589e5fc74cSSteve Capper 		return (pte_t *)pud;
68598ac0b81aSLi Xinhai 	if (!pud_present(*pud))
68608ac0b81aSLi Xinhai 		return NULL;
68618ac0b81aSLi Xinhai 	/* must have a valid entry and size to go further */
68629b19df29SPunit Agrawal 
68639e5fc74cSSteve Capper 	pmd = pmd_offset(pud, addr);
68648ac0b81aSLi Xinhai 	/* must be pmd huge, non-present or none */
68659e5fc74cSSteve Capper 	return (pte_t *)pmd;
68669e5fc74cSSteve Capper }
68679e5fc74cSSteve Capper 
6868e95a9851SMike Kravetz /*
6869e95a9851SMike Kravetz  * Return a mask that can be used to update an address to the last huge
6870e95a9851SMike Kravetz  * page in a page table page mapping size.  Used to skip non-present
6871e95a9851SMike Kravetz  * page table entries when linearly scanning address ranges.  Architectures
6872e95a9851SMike Kravetz  * with unique huge page to page table relationships can define their own
6873e95a9851SMike Kravetz  * version of this routine.
6874e95a9851SMike Kravetz  */
6875e95a9851SMike Kravetz unsigned long hugetlb_mask_last_page(struct hstate *h)
6876e95a9851SMike Kravetz {
6877e95a9851SMike Kravetz 	unsigned long hp_size = huge_page_size(h);
6878e95a9851SMike Kravetz 
6879e95a9851SMike Kravetz 	if (hp_size == PUD_SIZE)
6880e95a9851SMike Kravetz 		return P4D_SIZE - PUD_SIZE;
6881e95a9851SMike Kravetz 	else if (hp_size == PMD_SIZE)
6882e95a9851SMike Kravetz 		return PUD_SIZE - PMD_SIZE;
6883e95a9851SMike Kravetz 	else
6884e95a9851SMike Kravetz 		return 0UL;
6885e95a9851SMike Kravetz }
6886e95a9851SMike Kravetz 
6887e95a9851SMike Kravetz #else
6888e95a9851SMike Kravetz 
6889e95a9851SMike Kravetz /* See description above.  Architectures can provide their own version. */
6890e95a9851SMike Kravetz __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
6891e95a9851SMike Kravetz {
68924ddb4d91SMike Kravetz #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
68934ddb4d91SMike Kravetz 	if (huge_page_size(h) == PMD_SIZE)
68944ddb4d91SMike Kravetz 		return PUD_SIZE - PMD_SIZE;
68954ddb4d91SMike Kravetz #endif
6896e95a9851SMike Kravetz 	return 0UL;
6897e95a9851SMike Kravetz }
6898e95a9851SMike Kravetz 
689961f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
690061f77edaSNaoya Horiguchi 
690161f77edaSNaoya Horiguchi /*
690261f77edaSNaoya Horiguchi  * These functions are overwritable if your architecture needs its own
690361f77edaSNaoya Horiguchi  * behavior.
690461f77edaSNaoya Horiguchi  */
690561f77edaSNaoya Horiguchi struct page * __weak
690661f77edaSNaoya Horiguchi follow_huge_addr(struct mm_struct *mm, unsigned long address,
690761f77edaSNaoya Horiguchi 			      int write)
690861f77edaSNaoya Horiguchi {
690961f77edaSNaoya Horiguchi 	return ERR_PTR(-EINVAL);
691061f77edaSNaoya Horiguchi }
691161f77edaSNaoya Horiguchi 
691261f77edaSNaoya Horiguchi struct page * __weak
69134dc71451SAneesh Kumar K.V follow_huge_pd(struct vm_area_struct *vma,
69144dc71451SAneesh Kumar K.V 	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
69154dc71451SAneesh Kumar K.V {
69164dc71451SAneesh Kumar K.V 	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
69174dc71451SAneesh Kumar K.V 	return NULL;
69184dc71451SAneesh Kumar K.V }
69194dc71451SAneesh Kumar K.V 
69204dc71451SAneesh Kumar K.V struct page * __weak
69219e5fc74cSSteve Capper follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6922e66f17ffSNaoya Horiguchi 		pmd_t *pmd, int flags)
69239e5fc74cSSteve Capper {
6924e66f17ffSNaoya Horiguchi 	struct page *page = NULL;
6925e66f17ffSNaoya Horiguchi 	spinlock_t *ptl;
6926c9d398faSNaoya Horiguchi 	pte_t pte;
69273faa52c0SJohn Hubbard 
69288909691bSDavid Hildenbrand 	/*
69298909691bSDavid Hildenbrand 	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
69308909691bSDavid Hildenbrand 	 * follow_hugetlb_page().
69318909691bSDavid Hildenbrand 	 */
69328909691bSDavid Hildenbrand 	if (WARN_ON_ONCE(flags & FOLL_PIN))
69333faa52c0SJohn Hubbard 		return NULL;
69343faa52c0SJohn Hubbard 
6935e66f17ffSNaoya Horiguchi retry:
6936e66f17ffSNaoya Horiguchi 	ptl = pmd_lockptr(mm, pmd);
6937e66f17ffSNaoya Horiguchi 	spin_lock(ptl);
6938e66f17ffSNaoya Horiguchi 	/*
6939e66f17ffSNaoya Horiguchi 	 * make sure that the address range covered by this pmd is not
6940e66f17ffSNaoya Horiguchi 	 * unmapped from other threads.
6941e66f17ffSNaoya Horiguchi 	 */
6942e66f17ffSNaoya Horiguchi 	if (!pmd_huge(*pmd))
6943e66f17ffSNaoya Horiguchi 		goto out;
6944c9d398faSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pmd);
6945c9d398faSNaoya Horiguchi 	if (pte_present(pte)) {
694697534127SGerald Schaefer 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
69473faa52c0SJohn Hubbard 		/*
69483faa52c0SJohn Hubbard 		 * try_grab_page() should always succeed here, because: a) we
69493faa52c0SJohn Hubbard 		 * hold the pmd (ptl) lock, and b) we've just checked that the
69503faa52c0SJohn Hubbard 		 * huge pmd (head) page is present in the page tables. The ptl
69513faa52c0SJohn Hubbard 		 * prevents the head page and tail pages from being rearranged
69523faa52c0SJohn Hubbard 		 * in any way. So this page must be available at this point,
69533faa52c0SJohn Hubbard 		 * unless the page refcount overflowed:
69543faa52c0SJohn Hubbard 		 */
69553faa52c0SJohn Hubbard 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
69563faa52c0SJohn Hubbard 			page = NULL;
69573faa52c0SJohn Hubbard 			goto out;
69583faa52c0SJohn Hubbard 		}
6959e66f17ffSNaoya Horiguchi 	} else {
6960c9d398faSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
6961e66f17ffSNaoya Horiguchi 			spin_unlock(ptl);
6962ad1ac596SMiaohe Lin 			__migration_entry_wait_huge((pte_t *)pmd, ptl);
6963e66f17ffSNaoya Horiguchi 			goto retry;
6964e66f17ffSNaoya Horiguchi 		}
6965e66f17ffSNaoya Horiguchi 		/*
6966e66f17ffSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
6967e66f17ffSNaoya Horiguchi 		 * follow_page_mask().
6968e66f17ffSNaoya Horiguchi 		 */
6969e66f17ffSNaoya Horiguchi 	}
6970e66f17ffSNaoya Horiguchi out:
6971e66f17ffSNaoya Horiguchi 	spin_unlock(ptl);
69729e5fc74cSSteve Capper 	return page;
69739e5fc74cSSteve Capper }
69749e5fc74cSSteve Capper 
697561f77edaSNaoya Horiguchi struct page * __weak
69769e5fc74cSSteve Capper follow_huge_pud(struct mm_struct *mm, unsigned long address,
6977e66f17ffSNaoya Horiguchi 		pud_t *pud, int flags)
69789e5fc74cSSteve Capper {
69793a194f3fSNaoya Horiguchi 	struct page *page = NULL;
69803a194f3fSNaoya Horiguchi 	spinlock_t *ptl;
69813a194f3fSNaoya Horiguchi 	pte_t pte;
69823a194f3fSNaoya Horiguchi 
69833a194f3fSNaoya Horiguchi 	if (WARN_ON_ONCE(flags & FOLL_PIN))
6984e66f17ffSNaoya Horiguchi 		return NULL;
69859e5fc74cSSteve Capper 
69863a194f3fSNaoya Horiguchi retry:
69873a194f3fSNaoya Horiguchi 	ptl = huge_pte_lock(hstate_sizelog(PUD_SHIFT), mm, (pte_t *)pud);
69883a194f3fSNaoya Horiguchi 	if (!pud_huge(*pud))
69893a194f3fSNaoya Horiguchi 		goto out;
69903a194f3fSNaoya Horiguchi 	pte = huge_ptep_get((pte_t *)pud);
69913a194f3fSNaoya Horiguchi 	if (pte_present(pte)) {
69923a194f3fSNaoya Horiguchi 		page = pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
69933a194f3fSNaoya Horiguchi 		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
69943a194f3fSNaoya Horiguchi 			page = NULL;
69953a194f3fSNaoya Horiguchi 			goto out;
69963a194f3fSNaoya Horiguchi 		}
69973a194f3fSNaoya Horiguchi 	} else {
69983a194f3fSNaoya Horiguchi 		if (is_hugetlb_entry_migration(pte)) {
69993a194f3fSNaoya Horiguchi 			spin_unlock(ptl);
70003a194f3fSNaoya Horiguchi 			__migration_entry_wait(mm, (pte_t *)pud, ptl);
70013a194f3fSNaoya Horiguchi 			goto retry;
70023a194f3fSNaoya Horiguchi 		}
70033a194f3fSNaoya Horiguchi 		/*
70043a194f3fSNaoya Horiguchi 		 * hwpoisoned entry is treated as no_page_table in
70053a194f3fSNaoya Horiguchi 		 * follow_page_mask().
70063a194f3fSNaoya Horiguchi 		 */
70073a194f3fSNaoya Horiguchi 	}
70083a194f3fSNaoya Horiguchi out:
70093a194f3fSNaoya Horiguchi 	spin_unlock(ptl);
70103a194f3fSNaoya Horiguchi 	return page;
70119e5fc74cSSteve Capper }
70129e5fc74cSSteve Capper 
7013faaa5b62SAnshuman Khandual struct page * __weak
7014faaa5b62SAnshuman Khandual follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
7015faaa5b62SAnshuman Khandual {
70163faa52c0SJohn Hubbard 	if (flags & (FOLL_GET | FOLL_PIN))
7017faaa5b62SAnshuman Khandual 		return NULL;
7018faaa5b62SAnshuman Khandual 
7019faaa5b62SAnshuman Khandual 	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
7020faaa5b62SAnshuman Khandual }
7021faaa5b62SAnshuman Khandual 
70227ce82f4cSMiaohe Lin int isolate_hugetlb(struct page *page, struct list_head *list)
702331caf665SNaoya Horiguchi {
70247ce82f4cSMiaohe Lin 	int ret = 0;
7025bcc54222SNaoya Horiguchi 
7026db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
70278f251a3dSMike Kravetz 	if (!PageHeadHuge(page) ||
70288f251a3dSMike Kravetz 	    !HPageMigratable(page) ||
70290eb2df2bSMuchun Song 	    !get_page_unless_zero(page)) {
70307ce82f4cSMiaohe Lin 		ret = -EBUSY;
7031bcc54222SNaoya Horiguchi 		goto unlock;
7032bcc54222SNaoya Horiguchi 	}
70338f251a3dSMike Kravetz 	ClearHPageMigratable(page);
703431caf665SNaoya Horiguchi 	list_move_tail(&page->lru, list);
7035bcc54222SNaoya Horiguchi unlock:
7036db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
7037bcc54222SNaoya Horiguchi 	return ret;
703831caf665SNaoya Horiguchi }
703931caf665SNaoya Horiguchi 
704025182f05SNaoya Horiguchi int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
704125182f05SNaoya Horiguchi {
704225182f05SNaoya Horiguchi 	int ret = 0;
704325182f05SNaoya Horiguchi 
704425182f05SNaoya Horiguchi 	*hugetlb = false;
704525182f05SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
704625182f05SNaoya Horiguchi 	if (PageHeadHuge(page)) {
704725182f05SNaoya Horiguchi 		*hugetlb = true;
7048b283d983SNaoya Horiguchi 		if (HPageFreed(page))
7049b283d983SNaoya Horiguchi 			ret = 0;
7050b283d983SNaoya Horiguchi 		else if (HPageMigratable(page))
705125182f05SNaoya Horiguchi 			ret = get_page_unless_zero(page);
70520ed950d1SNaoya Horiguchi 		else
70530ed950d1SNaoya Horiguchi 			ret = -EBUSY;
705425182f05SNaoya Horiguchi 	}
705525182f05SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
705625182f05SNaoya Horiguchi 	return ret;
705725182f05SNaoya Horiguchi }
705825182f05SNaoya Horiguchi 
7059405ce051SNaoya Horiguchi int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
7060405ce051SNaoya Horiguchi {
7061405ce051SNaoya Horiguchi 	int ret;
7062405ce051SNaoya Horiguchi 
7063405ce051SNaoya Horiguchi 	spin_lock_irq(&hugetlb_lock);
7064405ce051SNaoya Horiguchi 	ret = __get_huge_page_for_hwpoison(pfn, flags);
7065405ce051SNaoya Horiguchi 	spin_unlock_irq(&hugetlb_lock);
7066405ce051SNaoya Horiguchi 	return ret;
7067405ce051SNaoya Horiguchi }
7068405ce051SNaoya Horiguchi 
706931caf665SNaoya Horiguchi void putback_active_hugepage(struct page *page)
707031caf665SNaoya Horiguchi {
7071db71ef79SMike Kravetz 	spin_lock_irq(&hugetlb_lock);
70728f251a3dSMike Kravetz 	SetHPageMigratable(page);
707331caf665SNaoya Horiguchi 	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
7074db71ef79SMike Kravetz 	spin_unlock_irq(&hugetlb_lock);
707531caf665SNaoya Horiguchi 	put_page(page);
707631caf665SNaoya Horiguchi }
7077ab5ac90aSMichal Hocko 
7078ab5ac90aSMichal Hocko void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
7079ab5ac90aSMichal Hocko {
7080ab5ac90aSMichal Hocko 	struct hstate *h = page_hstate(oldpage);
7081ab5ac90aSMichal Hocko 
7082ab5ac90aSMichal Hocko 	hugetlb_cgroup_migrate(oldpage, newpage);
7083ab5ac90aSMichal Hocko 	set_page_owner_migrate_reason(newpage, reason);
7084ab5ac90aSMichal Hocko 
7085ab5ac90aSMichal Hocko 	/*
7086ab5ac90aSMichal Hocko 	 * transfer temporary state of the new huge page. This is
7087ab5ac90aSMichal Hocko 	 * reverse to other transitions because the newpage is going to
7088ab5ac90aSMichal Hocko 	 * be final while the old one will be freed so it takes over
7089ab5ac90aSMichal Hocko 	 * the temporary status.
7090ab5ac90aSMichal Hocko 	 *
7091ab5ac90aSMichal Hocko 	 * Also note that we have to transfer the per-node surplus state
7092ab5ac90aSMichal Hocko 	 * here as well otherwise the global surplus count will not match
7093ab5ac90aSMichal Hocko 	 * the per-node's.
7094ab5ac90aSMichal Hocko 	 */
70959157c311SMike Kravetz 	if (HPageTemporary(newpage)) {
7096ab5ac90aSMichal Hocko 		int old_nid = page_to_nid(oldpage);
7097ab5ac90aSMichal Hocko 		int new_nid = page_to_nid(newpage);
7098ab5ac90aSMichal Hocko 
70999157c311SMike Kravetz 		SetHPageTemporary(oldpage);
71009157c311SMike Kravetz 		ClearHPageTemporary(newpage);
7101ab5ac90aSMichal Hocko 
71025af1ab1dSMiaohe Lin 		/*
71035af1ab1dSMiaohe Lin 		 * There is no need to transfer the per-node surplus state
71045af1ab1dSMiaohe Lin 		 * when we do not cross the node.
71055af1ab1dSMiaohe Lin 		 */
71065af1ab1dSMiaohe Lin 		if (new_nid == old_nid)
71075af1ab1dSMiaohe Lin 			return;
7108db71ef79SMike Kravetz 		spin_lock_irq(&hugetlb_lock);
7109ab5ac90aSMichal Hocko 		if (h->surplus_huge_pages_node[old_nid]) {
7110ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[old_nid]--;
7111ab5ac90aSMichal Hocko 			h->surplus_huge_pages_node[new_nid]++;
7112ab5ac90aSMichal Hocko 		}
7113db71ef79SMike Kravetz 		spin_unlock_irq(&hugetlb_lock);
7114ab5ac90aSMichal Hocko 	}
7115ab5ac90aSMichal Hocko }
7116cf11e85fSRoman Gushchin 
71176dfeaff9SPeter Xu /*
71186dfeaff9SPeter Xu  * This function will unconditionally remove all the shared pmd pgtable entries
71196dfeaff9SPeter Xu  * within the specific vma for a hugetlbfs memory range.
71206dfeaff9SPeter Xu  */
71216dfeaff9SPeter Xu void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
71226dfeaff9SPeter Xu {
71236dfeaff9SPeter Xu 	struct hstate *h = hstate_vma(vma);
71246dfeaff9SPeter Xu 	unsigned long sz = huge_page_size(h);
71256dfeaff9SPeter Xu 	struct mm_struct *mm = vma->vm_mm;
71266dfeaff9SPeter Xu 	struct mmu_notifier_range range;
71276dfeaff9SPeter Xu 	unsigned long address, start, end;
71286dfeaff9SPeter Xu 	spinlock_t *ptl;
71296dfeaff9SPeter Xu 	pte_t *ptep;
71306dfeaff9SPeter Xu 
71316dfeaff9SPeter Xu 	if (!(vma->vm_flags & VM_MAYSHARE))
71326dfeaff9SPeter Xu 		return;
71336dfeaff9SPeter Xu 
71346dfeaff9SPeter Xu 	start = ALIGN(vma->vm_start, PUD_SIZE);
71356dfeaff9SPeter Xu 	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
71366dfeaff9SPeter Xu 
71376dfeaff9SPeter Xu 	if (start >= end)
71386dfeaff9SPeter Xu 		return;
71396dfeaff9SPeter Xu 
71409c8bbfacSBaolin Wang 	flush_cache_range(vma, start, end);
71416dfeaff9SPeter Xu 	/*
71426dfeaff9SPeter Xu 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
71436dfeaff9SPeter Xu 	 * we have already done the PUD_SIZE alignment.
71446dfeaff9SPeter Xu 	 */
71456dfeaff9SPeter Xu 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
71466dfeaff9SPeter Xu 				start, end);
71476dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_start(&range);
71486dfeaff9SPeter Xu 	i_mmap_lock_write(vma->vm_file->f_mapping);
71496dfeaff9SPeter Xu 	for (address = start; address < end; address += PUD_SIZE) {
71506dfeaff9SPeter Xu 		ptep = huge_pte_offset(mm, address, sz);
71516dfeaff9SPeter Xu 		if (!ptep)
71526dfeaff9SPeter Xu 			continue;
71536dfeaff9SPeter Xu 		ptl = huge_pte_lock(h, mm, ptep);
71544ddb4d91SMike Kravetz 		huge_pmd_unshare(mm, vma, address, ptep);
71556dfeaff9SPeter Xu 		spin_unlock(ptl);
71566dfeaff9SPeter Xu 	}
71576dfeaff9SPeter Xu 	flush_hugetlb_tlb_range(vma, start, end);
71586dfeaff9SPeter Xu 	i_mmap_unlock_write(vma->vm_file->f_mapping);
71596dfeaff9SPeter Xu 	/*
71606dfeaff9SPeter Xu 	 * No need to call mmu_notifier_invalidate_range(), see
7161ee65728eSMike Rapoport 	 * Documentation/mm/mmu_notifier.rst.
71626dfeaff9SPeter Xu 	 */
71636dfeaff9SPeter Xu 	mmu_notifier_invalidate_range_end(&range);
71646dfeaff9SPeter Xu }
71656dfeaff9SPeter Xu 
7166cf11e85fSRoman Gushchin #ifdef CONFIG_CMA
7167cf11e85fSRoman Gushchin static bool cma_reserve_called __initdata;
7168cf11e85fSRoman Gushchin 
7169cf11e85fSRoman Gushchin static int __init cmdline_parse_hugetlb_cma(char *p)
7170cf11e85fSRoman Gushchin {
717138e719abSBaolin Wang 	int nid, count = 0;
717238e719abSBaolin Wang 	unsigned long tmp;
717338e719abSBaolin Wang 	char *s = p;
717438e719abSBaolin Wang 
717538e719abSBaolin Wang 	while (*s) {
717638e719abSBaolin Wang 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
717738e719abSBaolin Wang 			break;
717838e719abSBaolin Wang 
717938e719abSBaolin Wang 		if (s[count] == ':') {
7180f9317f77SMike Kravetz 			if (tmp >= MAX_NUMNODES)
718138e719abSBaolin Wang 				break;
7182f9317f77SMike Kravetz 			nid = array_index_nospec(tmp, MAX_NUMNODES);
718338e719abSBaolin Wang 
718438e719abSBaolin Wang 			s += count + 1;
718538e719abSBaolin Wang 			tmp = memparse(s, &s);
718638e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = tmp;
718738e719abSBaolin Wang 			hugetlb_cma_size += tmp;
718838e719abSBaolin Wang 
718938e719abSBaolin Wang 			/*
719038e719abSBaolin Wang 			 * Skip the separator if have one, otherwise
719138e719abSBaolin Wang 			 * break the parsing.
719238e719abSBaolin Wang 			 */
719338e719abSBaolin Wang 			if (*s == ',')
719438e719abSBaolin Wang 				s++;
719538e719abSBaolin Wang 			else
719638e719abSBaolin Wang 				break;
719738e719abSBaolin Wang 		} else {
7198cf11e85fSRoman Gushchin 			hugetlb_cma_size = memparse(p, &p);
719938e719abSBaolin Wang 			break;
720038e719abSBaolin Wang 		}
720138e719abSBaolin Wang 	}
720238e719abSBaolin Wang 
7203cf11e85fSRoman Gushchin 	return 0;
7204cf11e85fSRoman Gushchin }
7205cf11e85fSRoman Gushchin 
7206cf11e85fSRoman Gushchin early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7207cf11e85fSRoman Gushchin 
7208cf11e85fSRoman Gushchin void __init hugetlb_cma_reserve(int order)
7209cf11e85fSRoman Gushchin {
7210cf11e85fSRoman Gushchin 	unsigned long size, reserved, per_node;
721138e719abSBaolin Wang 	bool node_specific_cma_alloc = false;
7212cf11e85fSRoman Gushchin 	int nid;
7213cf11e85fSRoman Gushchin 
7214cf11e85fSRoman Gushchin 	cma_reserve_called = true;
7215cf11e85fSRoman Gushchin 
7216cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size)
7217cf11e85fSRoman Gushchin 		return;
7218cf11e85fSRoman Gushchin 
721938e719abSBaolin Wang 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
722038e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] == 0)
722138e719abSBaolin Wang 			continue;
722238e719abSBaolin Wang 
722330a51400SPeng Liu 		if (!node_online(nid)) {
722438e719abSBaolin Wang 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
722538e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
722638e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
722738e719abSBaolin Wang 			continue;
722838e719abSBaolin Wang 		}
722938e719abSBaolin Wang 
723038e719abSBaolin Wang 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
723138e719abSBaolin Wang 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
723238e719abSBaolin Wang 				nid, (PAGE_SIZE << order) / SZ_1M);
723338e719abSBaolin Wang 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
723438e719abSBaolin Wang 			hugetlb_cma_size_in_node[nid] = 0;
723538e719abSBaolin Wang 		} else {
723638e719abSBaolin Wang 			node_specific_cma_alloc = true;
723738e719abSBaolin Wang 		}
723838e719abSBaolin Wang 	}
723938e719abSBaolin Wang 
724038e719abSBaolin Wang 	/* Validate the CMA size again in case some invalid nodes specified. */
724138e719abSBaolin Wang 	if (!hugetlb_cma_size)
724238e719abSBaolin Wang 		return;
724338e719abSBaolin Wang 
7244cf11e85fSRoman Gushchin 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7245cf11e85fSRoman Gushchin 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7246cf11e85fSRoman Gushchin 			(PAGE_SIZE << order) / SZ_1M);
7247a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7248cf11e85fSRoman Gushchin 		return;
7249cf11e85fSRoman Gushchin 	}
7250cf11e85fSRoman Gushchin 
725138e719abSBaolin Wang 	if (!node_specific_cma_alloc) {
7252cf11e85fSRoman Gushchin 		/*
7253cf11e85fSRoman Gushchin 		 * If 3 GB area is requested on a machine with 4 numa nodes,
7254cf11e85fSRoman Gushchin 		 * let's allocate 1 GB on first three nodes and ignore the last one.
7255cf11e85fSRoman Gushchin 		 */
7256cf11e85fSRoman Gushchin 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7257cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7258cf11e85fSRoman Gushchin 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
725938e719abSBaolin Wang 	}
7260cf11e85fSRoman Gushchin 
7261cf11e85fSRoman Gushchin 	reserved = 0;
726230a51400SPeng Liu 	for_each_online_node(nid) {
7263cf11e85fSRoman Gushchin 		int res;
72642281f797SBarry Song 		char name[CMA_MAX_NAME];
7265cf11e85fSRoman Gushchin 
726638e719abSBaolin Wang 		if (node_specific_cma_alloc) {
726738e719abSBaolin Wang 			if (hugetlb_cma_size_in_node[nid] == 0)
726838e719abSBaolin Wang 				continue;
726938e719abSBaolin Wang 
727038e719abSBaolin Wang 			size = hugetlb_cma_size_in_node[nid];
727138e719abSBaolin Wang 		} else {
7272cf11e85fSRoman Gushchin 			size = min(per_node, hugetlb_cma_size - reserved);
727338e719abSBaolin Wang 		}
727438e719abSBaolin Wang 
7275cf11e85fSRoman Gushchin 		size = round_up(size, PAGE_SIZE << order);
7276cf11e85fSRoman Gushchin 
72772281f797SBarry Song 		snprintf(name, sizeof(name), "hugetlb%d", nid);
7278a01f4390SMike Kravetz 		/*
7279a01f4390SMike Kravetz 		 * Note that 'order per bit' is based on smallest size that
7280a01f4390SMike Kravetz 		 * may be returned to CMA allocator in the case of
7281a01f4390SMike Kravetz 		 * huge page demotion.
7282a01f4390SMike Kravetz 		 */
7283a01f4390SMike Kravetz 		res = cma_declare_contiguous_nid(0, size, 0,
7284a01f4390SMike Kravetz 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
728529d0f41dSBarry Song 						 0, false, name,
7286cf11e85fSRoman Gushchin 						 &hugetlb_cma[nid], nid);
7287cf11e85fSRoman Gushchin 		if (res) {
7288cf11e85fSRoman Gushchin 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7289cf11e85fSRoman Gushchin 				res, nid);
7290cf11e85fSRoman Gushchin 			continue;
7291cf11e85fSRoman Gushchin 		}
7292cf11e85fSRoman Gushchin 
7293cf11e85fSRoman Gushchin 		reserved += size;
7294cf11e85fSRoman Gushchin 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7295cf11e85fSRoman Gushchin 			size / SZ_1M, nid);
7296cf11e85fSRoman Gushchin 
7297cf11e85fSRoman Gushchin 		if (reserved >= hugetlb_cma_size)
7298cf11e85fSRoman Gushchin 			break;
7299cf11e85fSRoman Gushchin 	}
7300a01f4390SMike Kravetz 
7301a01f4390SMike Kravetz 	if (!reserved)
7302a01f4390SMike Kravetz 		/*
7303a01f4390SMike Kravetz 		 * hugetlb_cma_size is used to determine if allocations from
7304a01f4390SMike Kravetz 		 * cma are possible.  Set to zero if no cma regions are set up.
7305a01f4390SMike Kravetz 		 */
7306a01f4390SMike Kravetz 		hugetlb_cma_size = 0;
7307cf11e85fSRoman Gushchin }
7308cf11e85fSRoman Gushchin 
7309263b8998SMiaohe Lin static void __init hugetlb_cma_check(void)
7310cf11e85fSRoman Gushchin {
7311cf11e85fSRoman Gushchin 	if (!hugetlb_cma_size || cma_reserve_called)
7312cf11e85fSRoman Gushchin 		return;
7313cf11e85fSRoman Gushchin 
7314cf11e85fSRoman Gushchin 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7315cf11e85fSRoman Gushchin }
7316cf11e85fSRoman Gushchin 
7317cf11e85fSRoman Gushchin #endif /* CONFIG_CMA */
7318