1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Generic hugetlb support.
46d49e352SNadia Yvette Chambers * (C) Nadia Yvette Chambers, April 2004
51da177e4SLinus Torvalds */
61da177e4SLinus Torvalds #include <linux/list.h>
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/mm.h>
9e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
101da177e4SLinus Torvalds #include <linux/sysctl.h>
111da177e4SLinus Torvalds #include <linux/highmem.h>
12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
131da177e4SLinus Torvalds #include <linux/nodemask.h>
1463551ae0SDavid Gibson #include <linux/pagemap.h>
155da7ca86SChristoph Lameter #include <linux/mempolicy.h>
163b32123dSGideon Israel Dsouza #include <linux/compiler.h>
17aea47ff3SChristoph Lameter #include <linux/cpuset.h>
183935baa9SDavid Gibson #include <linux/mutex.h>
1997ad1087SMike Rapoport #include <linux/memblock.h>
20a3437870SNishanth Aravamudan #include <linux/sysfs.h>
215a0e3ad6STejun Heo #include <linux/slab.h>
22bbe88753SJoonsoo Kim #include <linux/sched/mm.h>
2363489f8eSMike Kravetz #include <linux/mmdebug.h>
24174cd4b1SIngo Molnar #include <linux/sched/signal.h>
250fe6e20bSNaoya Horiguchi #include <linux/rmap.h>
26c6247f72SMatthew Wilcox #include <linux/string_helpers.h>
27fd6a03edSNaoya Horiguchi #include <linux/swap.h>
28fd6a03edSNaoya Horiguchi #include <linux/swapops.h>
298382d914SDavidlohr Bueso #include <linux/jhash.h>
3098fa15f3SAnshuman Khandual #include <linux/numa.h>
31c77c0a8aSWaiman Long #include <linux/llist.h>
32cf11e85fSRoman Gushchin #include <linux/cma.h>
338cc5fcbbSMina Almasry #include <linux/migrate.h>
34f9317f77SMike Kravetz #include <linux/nospec.h>
35662ce1dcSYang Yang #include <linux/delayacct.h>
36b958d4d0SMuchun Song #include <linux/memory.h>
37af19487fSAxel Rasmussen #include <linux/mm_inline.h>
38d6606683SLinus Torvalds
3963551ae0SDavid Gibson #include <asm/page.h>
40ca15ca40SMike Rapoport #include <asm/pgalloc.h>
4124669e58SAneesh Kumar K.V #include <asm/tlb.h>
4263551ae0SDavid Gibson
4324669e58SAneesh Kumar K.V #include <linux/io.h>
4463551ae0SDavid Gibson #include <linux/hugetlb.h>
459dd540e2SAneesh Kumar K.V #include <linux/hugetlb_cgroup.h>
469a305230SLee Schermerhorn #include <linux/node.h>
47ab5ac90aSMichal Hocko #include <linux/page_owner.h>
487835e98bSNick Piggin #include "internal.h"
49f41f2ed4SMuchun Song #include "hugetlb_vmemmap.h"
501da177e4SLinus Torvalds
51c3f38a38SAneesh Kumar K.V int hugetlb_max_hstate __read_mostly;
52e5ff2159SAndi Kleen unsigned int default_hstate_idx;
53e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
54cf11e85fSRoman Gushchin
55dbda8feaSBarry Song #ifdef CONFIG_CMA
56cf11e85fSRoman Gushchin static struct cma *hugetlb_cma[MAX_NUMNODES];
5738e719abSBaolin Wang static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
hugetlb_cma_folio(struct folio * folio,unsigned int order)582f6c57d6SSidhartha Kumar static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
59a01f4390SMike Kravetz {
602f6c57d6SSidhartha Kumar return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
61a01f4390SMike Kravetz 1 << order);
62a01f4390SMike Kravetz }
63a01f4390SMike Kravetz #else
hugetlb_cma_folio(struct folio * folio,unsigned int order)642f6c57d6SSidhartha Kumar static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
65a01f4390SMike Kravetz {
66a01f4390SMike Kravetz return false;
67a01f4390SMike Kravetz }
68dbda8feaSBarry Song #endif
69dbda8feaSBarry Song static unsigned long hugetlb_cma_size __initdata;
70cf11e85fSRoman Gushchin
7153ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
7253ba51d2SJon Tollefson
73e5ff2159SAndi Kleen /* for command line parsing */
74e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
75e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
769fee021dSVaishali Thakkar static bool __initdata parsed_valid_hugepagesz = true;
77282f4214SMike Kravetz static bool __initdata parsed_default_hugepagesz;
78b5389086SZhenguo Yao static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
79e5ff2159SAndi Kleen
803935baa9SDavid Gibson /*
8131caf665SNaoya Horiguchi * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
8231caf665SNaoya Horiguchi * free_huge_pages, and surplus_huge_pages.
833935baa9SDavid Gibson */
84c3f38a38SAneesh Kumar K.V DEFINE_SPINLOCK(hugetlb_lock);
850bd0f9fbSEric Paris
868382d914SDavidlohr Bueso /*
878382d914SDavidlohr Bueso * Serializes faults on the same logical page. This is used to
888382d914SDavidlohr Bueso * prevent spurious OOMs when the hugepage pool is fully utilized.
898382d914SDavidlohr Bueso */
908382d914SDavidlohr Bueso static int num_fault_mutexes;
91c672c7f2SMike Kravetz struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
928382d914SDavidlohr Bueso
937ca02d0aSMike Kravetz /* Forward declaration */
947ca02d0aSMike Kravetz static int hugetlb_acct_memory(struct hstate *h, long delta);
958d9bfb26SMike Kravetz static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
968d9bfb26SMike Kravetz static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
97ecfbd733SMike Kravetz static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
98b30c14cdSJames Houghton static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
99b30c14cdSJames Houghton unsigned long start, unsigned long end);
100bf491692SRik van Riel static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
1017ca02d0aSMike Kravetz
subpool_is_free(struct hugepage_subpool * spool)1021d88433bSMiaohe Lin static inline bool subpool_is_free(struct hugepage_subpool *spool)
1031d88433bSMiaohe Lin {
1041d88433bSMiaohe Lin if (spool->count)
1051d88433bSMiaohe Lin return false;
1061d88433bSMiaohe Lin if (spool->max_hpages != -1)
1071d88433bSMiaohe Lin return spool->used_hpages == 0;
1081d88433bSMiaohe Lin if (spool->min_hpages != -1)
1091d88433bSMiaohe Lin return spool->rsv_hpages == spool->min_hpages;
1101d88433bSMiaohe Lin
1111d88433bSMiaohe Lin return true;
1121d88433bSMiaohe Lin }
1131d88433bSMiaohe Lin
unlock_or_release_subpool(struct hugepage_subpool * spool,unsigned long irq_flags)114db71ef79SMike Kravetz static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
115db71ef79SMike Kravetz unsigned long irq_flags)
11690481622SDavid Gibson {
117db71ef79SMike Kravetz spin_unlock_irqrestore(&spool->lock, irq_flags);
11890481622SDavid Gibson
11990481622SDavid Gibson /* If no pages are used, and no other handles to the subpool
1207c8de358SEthon Paul * remain, give up any reservations based on minimum size and
1217ca02d0aSMike Kravetz * free the subpool */
1221d88433bSMiaohe Lin if (subpool_is_free(spool)) {
1237ca02d0aSMike Kravetz if (spool->min_hpages != -1)
1247ca02d0aSMike Kravetz hugetlb_acct_memory(spool->hstate,
1257ca02d0aSMike Kravetz -spool->min_hpages);
12690481622SDavid Gibson kfree(spool);
12790481622SDavid Gibson }
1287ca02d0aSMike Kravetz }
12990481622SDavid Gibson
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)1307ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
1317ca02d0aSMike Kravetz long min_hpages)
13290481622SDavid Gibson {
13390481622SDavid Gibson struct hugepage_subpool *spool;
13490481622SDavid Gibson
135c6a91820SMike Kravetz spool = kzalloc(sizeof(*spool), GFP_KERNEL);
13690481622SDavid Gibson if (!spool)
13790481622SDavid Gibson return NULL;
13890481622SDavid Gibson
13990481622SDavid Gibson spin_lock_init(&spool->lock);
14090481622SDavid Gibson spool->count = 1;
1417ca02d0aSMike Kravetz spool->max_hpages = max_hpages;
1427ca02d0aSMike Kravetz spool->hstate = h;
1437ca02d0aSMike Kravetz spool->min_hpages = min_hpages;
1447ca02d0aSMike Kravetz
1457ca02d0aSMike Kravetz if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
1467ca02d0aSMike Kravetz kfree(spool);
1477ca02d0aSMike Kravetz return NULL;
1487ca02d0aSMike Kravetz }
1497ca02d0aSMike Kravetz spool->rsv_hpages = min_hpages;
15090481622SDavid Gibson
15190481622SDavid Gibson return spool;
15290481622SDavid Gibson }
15390481622SDavid Gibson
hugepage_put_subpool(struct hugepage_subpool * spool)15490481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool)
15590481622SDavid Gibson {
156db71ef79SMike Kravetz unsigned long flags;
157db71ef79SMike Kravetz
158db71ef79SMike Kravetz spin_lock_irqsave(&spool->lock, flags);
15990481622SDavid Gibson BUG_ON(!spool->count);
16090481622SDavid Gibson spool->count--;
161db71ef79SMike Kravetz unlock_or_release_subpool(spool, flags);
16290481622SDavid Gibson }
16390481622SDavid Gibson
1641c5ecae3SMike Kravetz /*
1651c5ecae3SMike Kravetz * Subpool accounting for allocating and reserving pages.
1661c5ecae3SMike Kravetz * Return -ENOMEM if there are not enough resources to satisfy the
1679e7ee400SRandy Dunlap * request. Otherwise, return the number of pages by which the
1681c5ecae3SMike Kravetz * global pools must be adjusted (upward). The returned value may
1691c5ecae3SMike Kravetz * only be different than the passed value (delta) in the case where
1707c8de358SEthon Paul * a subpool minimum size must be maintained.
1711c5ecae3SMike Kravetz */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)1721c5ecae3SMike Kravetz static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
17390481622SDavid Gibson long delta)
17490481622SDavid Gibson {
1751c5ecae3SMike Kravetz long ret = delta;
17690481622SDavid Gibson
17790481622SDavid Gibson if (!spool)
1781c5ecae3SMike Kravetz return ret;
17990481622SDavid Gibson
180db71ef79SMike Kravetz spin_lock_irq(&spool->lock);
18190481622SDavid Gibson
1821c5ecae3SMike Kravetz if (spool->max_hpages != -1) { /* maximum size accounting */
1831c5ecae3SMike Kravetz if ((spool->used_hpages + delta) <= spool->max_hpages)
1841c5ecae3SMike Kravetz spool->used_hpages += delta;
1851c5ecae3SMike Kravetz else {
1861c5ecae3SMike Kravetz ret = -ENOMEM;
1871c5ecae3SMike Kravetz goto unlock_ret;
1881c5ecae3SMike Kravetz }
1891c5ecae3SMike Kravetz }
1901c5ecae3SMike Kravetz
19109a95e29SMike Kravetz /* minimum size accounting */
19209a95e29SMike Kravetz if (spool->min_hpages != -1 && spool->rsv_hpages) {
1931c5ecae3SMike Kravetz if (delta > spool->rsv_hpages) {
1941c5ecae3SMike Kravetz /*
1951c5ecae3SMike Kravetz * Asking for more reserves than those already taken on
1961c5ecae3SMike Kravetz * behalf of subpool. Return difference.
1971c5ecae3SMike Kravetz */
1981c5ecae3SMike Kravetz ret = delta - spool->rsv_hpages;
1991c5ecae3SMike Kravetz spool->rsv_hpages = 0;
2001c5ecae3SMike Kravetz } else {
2011c5ecae3SMike Kravetz ret = 0; /* reserves already accounted for */
2021c5ecae3SMike Kravetz spool->rsv_hpages -= delta;
2031c5ecae3SMike Kravetz }
2041c5ecae3SMike Kravetz }
2051c5ecae3SMike Kravetz
2061c5ecae3SMike Kravetz unlock_ret:
207db71ef79SMike Kravetz spin_unlock_irq(&spool->lock);
20890481622SDavid Gibson return ret;
20990481622SDavid Gibson }
21090481622SDavid Gibson
2111c5ecae3SMike Kravetz /*
2121c5ecae3SMike Kravetz * Subpool accounting for freeing and unreserving pages.
2131c5ecae3SMike Kravetz * Return the number of global page reservations that must be dropped.
2141c5ecae3SMike Kravetz * The return value may only be different than the passed value (delta)
2151c5ecae3SMike Kravetz * in the case where a subpool minimum size must be maintained.
2161c5ecae3SMike Kravetz */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)2171c5ecae3SMike Kravetz static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
21890481622SDavid Gibson long delta)
21990481622SDavid Gibson {
2201c5ecae3SMike Kravetz long ret = delta;
221db71ef79SMike Kravetz unsigned long flags;
2221c5ecae3SMike Kravetz
22390481622SDavid Gibson if (!spool)
2241c5ecae3SMike Kravetz return delta;
22590481622SDavid Gibson
226db71ef79SMike Kravetz spin_lock_irqsave(&spool->lock, flags);
2271c5ecae3SMike Kravetz
2281c5ecae3SMike Kravetz if (spool->max_hpages != -1) /* maximum size accounting */
22990481622SDavid Gibson spool->used_hpages -= delta;
2301c5ecae3SMike Kravetz
23109a95e29SMike Kravetz /* minimum size accounting */
23209a95e29SMike Kravetz if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
2331c5ecae3SMike Kravetz if (spool->rsv_hpages + delta <= spool->min_hpages)
2341c5ecae3SMike Kravetz ret = 0;
2351c5ecae3SMike Kravetz else
2361c5ecae3SMike Kravetz ret = spool->rsv_hpages + delta - spool->min_hpages;
2371c5ecae3SMike Kravetz
2381c5ecae3SMike Kravetz spool->rsv_hpages += delta;
2391c5ecae3SMike Kravetz if (spool->rsv_hpages > spool->min_hpages)
2401c5ecae3SMike Kravetz spool->rsv_hpages = spool->min_hpages;
2411c5ecae3SMike Kravetz }
2421c5ecae3SMike Kravetz
2431c5ecae3SMike Kravetz /*
2441c5ecae3SMike Kravetz * If hugetlbfs_put_super couldn't free spool due to an outstanding
2451c5ecae3SMike Kravetz * quota reference, free it now.
2461c5ecae3SMike Kravetz */
247db71ef79SMike Kravetz unlock_or_release_subpool(spool, flags);
2481c5ecae3SMike Kravetz
2491c5ecae3SMike Kravetz return ret;
25090481622SDavid Gibson }
25190481622SDavid Gibson
subpool_inode(struct inode * inode)25290481622SDavid Gibson static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
25390481622SDavid Gibson {
25490481622SDavid Gibson return HUGETLBFS_SB(inode->i_sb)->spool;
25590481622SDavid Gibson }
25690481622SDavid Gibson
subpool_vma(struct vm_area_struct * vma)25790481622SDavid Gibson static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
25890481622SDavid Gibson {
259496ad9aaSAl Viro return subpool_inode(file_inode(vma->vm_file));
26090481622SDavid Gibson }
26190481622SDavid Gibson
262e700898fSMike Kravetz /*
263e700898fSMike Kravetz * hugetlb vma_lock helper routines
264e700898fSMike Kravetz */
hugetlb_vma_lock_read(struct vm_area_struct * vma)265e700898fSMike Kravetz void hugetlb_vma_lock_read(struct vm_area_struct *vma)
266e700898fSMike Kravetz {
267e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
268e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
269e700898fSMike Kravetz
270e700898fSMike Kravetz down_read(&vma_lock->rw_sema);
271bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
272bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
273bf491692SRik van Riel
274bf491692SRik van Riel down_read(&resv_map->rw_sema);
275e700898fSMike Kravetz }
276e700898fSMike Kravetz }
277e700898fSMike Kravetz
hugetlb_vma_unlock_read(struct vm_area_struct * vma)278e700898fSMike Kravetz void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
279e700898fSMike Kravetz {
280e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
281e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
282e700898fSMike Kravetz
283e700898fSMike Kravetz up_read(&vma_lock->rw_sema);
284bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
285bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
286bf491692SRik van Riel
287bf491692SRik van Riel up_read(&resv_map->rw_sema);
288e700898fSMike Kravetz }
289e700898fSMike Kravetz }
290e700898fSMike Kravetz
hugetlb_vma_lock_write(struct vm_area_struct * vma)291e700898fSMike Kravetz void hugetlb_vma_lock_write(struct vm_area_struct *vma)
292e700898fSMike Kravetz {
293e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
294e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
295e700898fSMike Kravetz
296e700898fSMike Kravetz down_write(&vma_lock->rw_sema);
297bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
298bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
299bf491692SRik van Riel
300bf491692SRik van Riel down_write(&resv_map->rw_sema);
301e700898fSMike Kravetz }
302e700898fSMike Kravetz }
303e700898fSMike Kravetz
hugetlb_vma_unlock_write(struct vm_area_struct * vma)304e700898fSMike Kravetz void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
305e700898fSMike Kravetz {
306e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
307e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
308e700898fSMike Kravetz
309e700898fSMike Kravetz up_write(&vma_lock->rw_sema);
310bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
311bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
312bf491692SRik van Riel
313bf491692SRik van Riel up_write(&resv_map->rw_sema);
314e700898fSMike Kravetz }
315e700898fSMike Kravetz }
316e700898fSMike Kravetz
hugetlb_vma_trylock_write(struct vm_area_struct * vma)317e700898fSMike Kravetz int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
318e700898fSMike Kravetz {
319bf491692SRik van Riel
320bf491692SRik van Riel if (__vma_shareable_lock(vma)) {
321e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
322e700898fSMike Kravetz
323e700898fSMike Kravetz return down_write_trylock(&vma_lock->rw_sema);
324bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
325bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
326bf491692SRik van Riel
327bf491692SRik van Riel return down_write_trylock(&resv_map->rw_sema);
328bf491692SRik van Riel }
329bf491692SRik van Riel
330bf491692SRik van Riel return 1;
331e700898fSMike Kravetz }
332e700898fSMike Kravetz
hugetlb_vma_assert_locked(struct vm_area_struct * vma)333e700898fSMike Kravetz void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
334e700898fSMike Kravetz {
335e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
336e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
337e700898fSMike Kravetz
338e700898fSMike Kravetz lockdep_assert_held(&vma_lock->rw_sema);
339bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
340bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
341bf491692SRik van Riel
342bf491692SRik van Riel lockdep_assert_held(&resv_map->rw_sema);
343e700898fSMike Kravetz }
344e700898fSMike Kravetz }
345e700898fSMike Kravetz
hugetlb_vma_lock_release(struct kref * kref)346e700898fSMike Kravetz void hugetlb_vma_lock_release(struct kref *kref)
347e700898fSMike Kravetz {
348e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = container_of(kref,
349e700898fSMike Kravetz struct hugetlb_vma_lock, refs);
350e700898fSMike Kravetz
351e700898fSMike Kravetz kfree(vma_lock);
352e700898fSMike Kravetz }
353e700898fSMike Kravetz
__hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock * vma_lock)354e700898fSMike Kravetz static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
355e700898fSMike Kravetz {
356e700898fSMike Kravetz struct vm_area_struct *vma = vma_lock->vma;
357e700898fSMike Kravetz
358e700898fSMike Kravetz /*
359e700898fSMike Kravetz * vma_lock structure may or not be released as a result of put,
360e700898fSMike Kravetz * it certainly will no longer be attached to vma so clear pointer.
361e700898fSMike Kravetz * Semaphore synchronizes access to vma_lock->vma field.
362e700898fSMike Kravetz */
363e700898fSMike Kravetz vma_lock->vma = NULL;
364e700898fSMike Kravetz vma->vm_private_data = NULL;
365e700898fSMike Kravetz up_write(&vma_lock->rw_sema);
366e700898fSMike Kravetz kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
367e700898fSMike Kravetz }
368e700898fSMike Kravetz
__hugetlb_vma_unlock_write_free(struct vm_area_struct * vma)369e700898fSMike Kravetz static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
370e700898fSMike Kravetz {
371e700898fSMike Kravetz if (__vma_shareable_lock(vma)) {
372e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
373e700898fSMike Kravetz
374e700898fSMike Kravetz __hugetlb_vma_unlock_write_put(vma_lock);
375bf491692SRik van Riel } else if (__vma_private_lock(vma)) {
376bf491692SRik van Riel struct resv_map *resv_map = vma_resv_map(vma);
377bf491692SRik van Riel
378bf491692SRik van Riel /* no free for anon vmas, but still need to unlock */
379bf491692SRik van Riel up_write(&resv_map->rw_sema);
380e700898fSMike Kravetz }
381e700898fSMike Kravetz }
382e700898fSMike Kravetz
hugetlb_vma_lock_free(struct vm_area_struct * vma)383e700898fSMike Kravetz static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
384e700898fSMike Kravetz {
385e700898fSMike Kravetz /*
386e700898fSMike Kravetz * Only present in sharable vmas.
387e700898fSMike Kravetz */
388e700898fSMike Kravetz if (!vma || !__vma_shareable_lock(vma))
389e700898fSMike Kravetz return;
390e700898fSMike Kravetz
391e700898fSMike Kravetz if (vma->vm_private_data) {
392e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
393e700898fSMike Kravetz
394e700898fSMike Kravetz down_write(&vma_lock->rw_sema);
395e700898fSMike Kravetz __hugetlb_vma_unlock_write_put(vma_lock);
396e700898fSMike Kravetz }
397e700898fSMike Kravetz }
398e700898fSMike Kravetz
hugetlb_vma_lock_alloc(struct vm_area_struct * vma)399e700898fSMike Kravetz static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
400e700898fSMike Kravetz {
401e700898fSMike Kravetz struct hugetlb_vma_lock *vma_lock;
402e700898fSMike Kravetz
403e700898fSMike Kravetz /* Only establish in (flags) sharable vmas */
404e700898fSMike Kravetz if (!vma || !(vma->vm_flags & VM_MAYSHARE))
405e700898fSMike Kravetz return;
406e700898fSMike Kravetz
407e700898fSMike Kravetz /* Should never get here with non-NULL vm_private_data */
408e700898fSMike Kravetz if (vma->vm_private_data)
409e700898fSMike Kravetz return;
410e700898fSMike Kravetz
411e700898fSMike Kravetz vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
412e700898fSMike Kravetz if (!vma_lock) {
413e700898fSMike Kravetz /*
414e700898fSMike Kravetz * If we can not allocate structure, then vma can not
415e700898fSMike Kravetz * participate in pmd sharing. This is only a possible
416e700898fSMike Kravetz * performance enhancement and memory saving issue.
417e700898fSMike Kravetz * However, the lock is also used to synchronize page
418e700898fSMike Kravetz * faults with truncation. If the lock is not present,
419e700898fSMike Kravetz * unlikely races could leave pages in a file past i_size
420e700898fSMike Kravetz * until the file is removed. Warn in the unlikely case of
421e700898fSMike Kravetz * allocation failure.
422e700898fSMike Kravetz */
423e700898fSMike Kravetz pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
424e700898fSMike Kravetz return;
425e700898fSMike Kravetz }
426e700898fSMike Kravetz
427e700898fSMike Kravetz kref_init(&vma_lock->refs);
428e700898fSMike Kravetz init_rwsem(&vma_lock->rw_sema);
429e700898fSMike Kravetz vma_lock->vma = vma;
430e700898fSMike Kravetz vma->vm_private_data = vma_lock;
431e700898fSMike Kravetz }
432e700898fSMike Kravetz
4330db9d74eSMina Almasry /* Helper that removes a struct file_region from the resv_map cache and returns
4340db9d74eSMina Almasry * it for use.
4350db9d74eSMina Almasry */
4360db9d74eSMina Almasry static struct file_region *
get_file_region_entry_from_cache(struct resv_map * resv,long from,long to)4370db9d74eSMina Almasry get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
4380db9d74eSMina Almasry {
4393259914fSXU pengfei struct file_region *nrg;
4400db9d74eSMina Almasry
4410db9d74eSMina Almasry VM_BUG_ON(resv->region_cache_count <= 0);
4420db9d74eSMina Almasry
4430db9d74eSMina Almasry resv->region_cache_count--;
4440db9d74eSMina Almasry nrg = list_first_entry(&resv->region_cache, struct file_region, link);
4450db9d74eSMina Almasry list_del(&nrg->link);
4460db9d74eSMina Almasry
4470db9d74eSMina Almasry nrg->from = from;
4480db9d74eSMina Almasry nrg->to = to;
4490db9d74eSMina Almasry
4500db9d74eSMina Almasry return nrg;
4510db9d74eSMina Almasry }
4520db9d74eSMina Almasry
copy_hugetlb_cgroup_uncharge_info(struct file_region * nrg,struct file_region * rg)453075a61d0SMina Almasry static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
454075a61d0SMina Almasry struct file_region *rg)
455075a61d0SMina Almasry {
456075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
457075a61d0SMina Almasry nrg->reservation_counter = rg->reservation_counter;
458075a61d0SMina Almasry nrg->css = rg->css;
459075a61d0SMina Almasry if (rg->css)
460075a61d0SMina Almasry css_get(rg->css);
461075a61d0SMina Almasry #endif
462075a61d0SMina Almasry }
463075a61d0SMina Almasry
464075a61d0SMina Almasry /* Helper that records hugetlb_cgroup uncharge info. */
record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg)465075a61d0SMina Almasry static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
466075a61d0SMina Almasry struct hstate *h,
467075a61d0SMina Almasry struct resv_map *resv,
468075a61d0SMina Almasry struct file_region *nrg)
469075a61d0SMina Almasry {
470075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
471075a61d0SMina Almasry if (h_cg) {
472075a61d0SMina Almasry nrg->reservation_counter =
473075a61d0SMina Almasry &h_cg->rsvd_hugepage[hstate_index(h)];
474075a61d0SMina Almasry nrg->css = &h_cg->css;
475d85aecf2SMiaohe Lin /*
476d85aecf2SMiaohe Lin * The caller will hold exactly one h_cg->css reference for the
477d85aecf2SMiaohe Lin * whole contiguous reservation region. But this area might be
478d85aecf2SMiaohe Lin * scattered when there are already some file_regions reside in
479d85aecf2SMiaohe Lin * it. As a result, many file_regions may share only one css
480d85aecf2SMiaohe Lin * reference. In order to ensure that one file_region must hold
481d85aecf2SMiaohe Lin * exactly one h_cg->css reference, we should do css_get for
482d85aecf2SMiaohe Lin * each file_region and leave the reference held by caller
483d85aecf2SMiaohe Lin * untouched.
484d85aecf2SMiaohe Lin */
485d85aecf2SMiaohe Lin css_get(&h_cg->css);
486075a61d0SMina Almasry if (!resv->pages_per_hpage)
487075a61d0SMina Almasry resv->pages_per_hpage = pages_per_huge_page(h);
488075a61d0SMina Almasry /* pages_per_hpage should be the same for all entries in
489075a61d0SMina Almasry * a resv_map.
490075a61d0SMina Almasry */
491075a61d0SMina Almasry VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
492075a61d0SMina Almasry } else {
493075a61d0SMina Almasry nrg->reservation_counter = NULL;
494075a61d0SMina Almasry nrg->css = NULL;
495075a61d0SMina Almasry }
496075a61d0SMina Almasry #endif
497075a61d0SMina Almasry }
498075a61d0SMina Almasry
put_uncharge_info(struct file_region * rg)499d85aecf2SMiaohe Lin static void put_uncharge_info(struct file_region *rg)
500d85aecf2SMiaohe Lin {
501d85aecf2SMiaohe Lin #ifdef CONFIG_CGROUP_HUGETLB
502d85aecf2SMiaohe Lin if (rg->css)
503d85aecf2SMiaohe Lin css_put(rg->css);
504d85aecf2SMiaohe Lin #endif
505d85aecf2SMiaohe Lin }
506d85aecf2SMiaohe Lin
has_same_uncharge_info(struct file_region * rg,struct file_region * org)507a9b3f867SMina Almasry static bool has_same_uncharge_info(struct file_region *rg,
508a9b3f867SMina Almasry struct file_region *org)
509a9b3f867SMina Almasry {
510a9b3f867SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
5110739eb43SBaolin Wang return rg->reservation_counter == org->reservation_counter &&
512a9b3f867SMina Almasry rg->css == org->css;
513a9b3f867SMina Almasry
514a9b3f867SMina Almasry #else
515a9b3f867SMina Almasry return true;
516a9b3f867SMina Almasry #endif
517a9b3f867SMina Almasry }
518a9b3f867SMina Almasry
coalesce_file_region(struct resv_map * resv,struct file_region * rg)519a9b3f867SMina Almasry static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
520a9b3f867SMina Almasry {
5213259914fSXU pengfei struct file_region *nrg, *prg;
522a9b3f867SMina Almasry
523a9b3f867SMina Almasry prg = list_prev_entry(rg, link);
524a9b3f867SMina Almasry if (&prg->link != &resv->regions && prg->to == rg->from &&
525a9b3f867SMina Almasry has_same_uncharge_info(prg, rg)) {
526a9b3f867SMina Almasry prg->to = rg->to;
527a9b3f867SMina Almasry
528a9b3f867SMina Almasry list_del(&rg->link);
529d85aecf2SMiaohe Lin put_uncharge_info(rg);
530a9b3f867SMina Almasry kfree(rg);
531a9b3f867SMina Almasry
5327db5e7b6SWei Yang rg = prg;
533a9b3f867SMina Almasry }
534a9b3f867SMina Almasry
535a9b3f867SMina Almasry nrg = list_next_entry(rg, link);
536a9b3f867SMina Almasry if (&nrg->link != &resv->regions && nrg->from == rg->to &&
537a9b3f867SMina Almasry has_same_uncharge_info(nrg, rg)) {
538a9b3f867SMina Almasry nrg->from = rg->from;
539a9b3f867SMina Almasry
540a9b3f867SMina Almasry list_del(&rg->link);
541d85aecf2SMiaohe Lin put_uncharge_info(rg);
542a9b3f867SMina Almasry kfree(rg);
543a9b3f867SMina Almasry }
544a9b3f867SMina Almasry }
545a9b3f867SMina Almasry
5462103cf9cSPeter Xu static inline long
hugetlb_resv_map_add(struct resv_map * map,struct list_head * rg,long from,long to,struct hstate * h,struct hugetlb_cgroup * cg,long * regions_needed)54784448c8eSJakob Koschel hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
5482103cf9cSPeter Xu long to, struct hstate *h, struct hugetlb_cgroup *cg,
5492103cf9cSPeter Xu long *regions_needed)
5502103cf9cSPeter Xu {
5512103cf9cSPeter Xu struct file_region *nrg;
5522103cf9cSPeter Xu
5532103cf9cSPeter Xu if (!regions_needed) {
5542103cf9cSPeter Xu nrg = get_file_region_entry_from_cache(map, from, to);
5552103cf9cSPeter Xu record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
55684448c8eSJakob Koschel list_add(&nrg->link, rg);
5572103cf9cSPeter Xu coalesce_file_region(map, nrg);
5582103cf9cSPeter Xu } else
5592103cf9cSPeter Xu *regions_needed += 1;
5602103cf9cSPeter Xu
5612103cf9cSPeter Xu return to - from;
5622103cf9cSPeter Xu }
5632103cf9cSPeter Xu
564972a3da3SWei Yang /*
565972a3da3SWei Yang * Must be called with resv->lock held.
566972a3da3SWei Yang *
567972a3da3SWei Yang * Calling this with regions_needed != NULL will count the number of pages
568972a3da3SWei Yang * to be added but will not modify the linked list. And regions_needed will
569972a3da3SWei Yang * indicate the number of file_regions needed in the cache to carry out to add
570972a3da3SWei Yang * the regions for this range.
571d75c6af9SMina Almasry */
add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed)572d75c6af9SMina Almasry static long add_reservation_in_range(struct resv_map *resv, long f, long t,
573075a61d0SMina Almasry struct hugetlb_cgroup *h_cg,
574972a3da3SWei Yang struct hstate *h, long *regions_needed)
575d75c6af9SMina Almasry {
5760db9d74eSMina Almasry long add = 0;
577d75c6af9SMina Almasry struct list_head *head = &resv->regions;
5780db9d74eSMina Almasry long last_accounted_offset = f;
57984448c8eSJakob Koschel struct file_region *iter, *trg = NULL;
58084448c8eSJakob Koschel struct list_head *rg = NULL;
581d75c6af9SMina Almasry
5820db9d74eSMina Almasry if (regions_needed)
5830db9d74eSMina Almasry *regions_needed = 0;
584d75c6af9SMina Almasry
5850db9d74eSMina Almasry /* In this loop, we essentially handle an entry for the range
58684448c8eSJakob Koschel * [last_accounted_offset, iter->from), at every iteration, with some
5870db9d74eSMina Almasry * bounds checking.
5880db9d74eSMina Almasry */
58984448c8eSJakob Koschel list_for_each_entry_safe(iter, trg, head, link) {
5900db9d74eSMina Almasry /* Skip irrelevant regions that start before our range. */
59184448c8eSJakob Koschel if (iter->from < f) {
5920db9d74eSMina Almasry /* If this region ends after the last accounted offset,
5930db9d74eSMina Almasry * then we need to update last_accounted_offset.
5940db9d74eSMina Almasry */
59584448c8eSJakob Koschel if (iter->to > last_accounted_offset)
59684448c8eSJakob Koschel last_accounted_offset = iter->to;
5970db9d74eSMina Almasry continue;
5980db9d74eSMina Almasry }
599d75c6af9SMina Almasry
6000db9d74eSMina Almasry /* When we find a region that starts beyond our range, we've
6010db9d74eSMina Almasry * finished.
6020db9d74eSMina Almasry */
60384448c8eSJakob Koschel if (iter->from >= t) {
60484448c8eSJakob Koschel rg = iter->link.prev;
605d75c6af9SMina Almasry break;
60684448c8eSJakob Koschel }
607d75c6af9SMina Almasry
60884448c8eSJakob Koschel /* Add an entry for last_accounted_offset -> iter->from, and
6090db9d74eSMina Almasry * update last_accounted_offset.
610d75c6af9SMina Almasry */
61184448c8eSJakob Koschel if (iter->from > last_accounted_offset)
61284448c8eSJakob Koschel add += hugetlb_resv_map_add(resv, iter->link.prev,
6132103cf9cSPeter Xu last_accounted_offset,
61484448c8eSJakob Koschel iter->from, h, h_cg,
6152103cf9cSPeter Xu regions_needed);
616d75c6af9SMina Almasry
61784448c8eSJakob Koschel last_accounted_offset = iter->to;
6180db9d74eSMina Almasry }
6190db9d74eSMina Almasry
6200db9d74eSMina Almasry /* Handle the case where our range extends beyond
6210db9d74eSMina Almasry * last_accounted_offset.
6220db9d74eSMina Almasry */
62384448c8eSJakob Koschel if (!rg)
62484448c8eSJakob Koschel rg = head->prev;
6252103cf9cSPeter Xu if (last_accounted_offset < t)
6262103cf9cSPeter Xu add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
6272103cf9cSPeter Xu t, h, h_cg, regions_needed);
6280db9d74eSMina Almasry
6290db9d74eSMina Almasry return add;
6300db9d74eSMina Almasry }
6310db9d74eSMina Almasry
6320db9d74eSMina Almasry /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
6330db9d74eSMina Almasry */
allocate_file_region_entries(struct resv_map * resv,int regions_needed)6340db9d74eSMina Almasry static int allocate_file_region_entries(struct resv_map *resv,
6350db9d74eSMina Almasry int regions_needed)
6360db9d74eSMina Almasry __must_hold(&resv->lock)
6370db9d74eSMina Almasry {
63834665341SMiaohe Lin LIST_HEAD(allocated_regions);
6390db9d74eSMina Almasry int to_allocate = 0, i = 0;
6400db9d74eSMina Almasry struct file_region *trg = NULL, *rg = NULL;
6410db9d74eSMina Almasry
6420db9d74eSMina Almasry VM_BUG_ON(regions_needed < 0);
6430db9d74eSMina Almasry
6440db9d74eSMina Almasry /*
6450db9d74eSMina Almasry * Check for sufficient descriptors in the cache to accommodate
6460db9d74eSMina Almasry * the number of in progress add operations plus regions_needed.
6470db9d74eSMina Almasry *
6480db9d74eSMina Almasry * This is a while loop because when we drop the lock, some other call
6490db9d74eSMina Almasry * to region_add or region_del may have consumed some region_entries,
6500db9d74eSMina Almasry * so we keep looping here until we finally have enough entries for
6510db9d74eSMina Almasry * (adds_in_progress + regions_needed).
6520db9d74eSMina Almasry */
6530db9d74eSMina Almasry while (resv->region_cache_count <
6540db9d74eSMina Almasry (resv->adds_in_progress + regions_needed)) {
6550db9d74eSMina Almasry to_allocate = resv->adds_in_progress + regions_needed -
6560db9d74eSMina Almasry resv->region_cache_count;
6570db9d74eSMina Almasry
6580db9d74eSMina Almasry /* At this point, we should have enough entries in the cache
659f0953a1bSIngo Molnar * for all the existing adds_in_progress. We should only be
6600db9d74eSMina Almasry * needing to allocate for regions_needed.
6610db9d74eSMina Almasry */
6620db9d74eSMina Almasry VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
6630db9d74eSMina Almasry
6640db9d74eSMina Almasry spin_unlock(&resv->lock);
6650db9d74eSMina Almasry for (i = 0; i < to_allocate; i++) {
6660db9d74eSMina Almasry trg = kmalloc(sizeof(*trg), GFP_KERNEL);
6670db9d74eSMina Almasry if (!trg)
6680db9d74eSMina Almasry goto out_of_memory;
6690db9d74eSMina Almasry list_add(&trg->link, &allocated_regions);
6700db9d74eSMina Almasry }
6710db9d74eSMina Almasry
6720db9d74eSMina Almasry spin_lock(&resv->lock);
6730db9d74eSMina Almasry
674d3ec7b6eSWei Yang list_splice(&allocated_regions, &resv->region_cache);
675d3ec7b6eSWei Yang resv->region_cache_count += to_allocate;
6760db9d74eSMina Almasry }
6770db9d74eSMina Almasry
6780db9d74eSMina Almasry return 0;
6790db9d74eSMina Almasry
6800db9d74eSMina Almasry out_of_memory:
6810db9d74eSMina Almasry list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
682d75c6af9SMina Almasry list_del(&rg->link);
683d75c6af9SMina Almasry kfree(rg);
684d75c6af9SMina Almasry }
6850db9d74eSMina Almasry return -ENOMEM;
686d75c6af9SMina Almasry }
687d75c6af9SMina Almasry
6881dd308a7SMike Kravetz /*
6891dd308a7SMike Kravetz * Add the huge page range represented by [f, t) to the reserve
6900db9d74eSMina Almasry * map. Regions will be taken from the cache to fill in this range.
6910db9d74eSMina Almasry * Sufficient regions should exist in the cache due to the previous
6920db9d74eSMina Almasry * call to region_chg with the same range, but in some cases the cache will not
6930db9d74eSMina Almasry * have sufficient entries due to races with other code doing region_add or
6940db9d74eSMina Almasry * region_del. The extra needed entries will be allocated.
695cf3ad20bSMike Kravetz *
6960db9d74eSMina Almasry * regions_needed is the out value provided by a previous call to region_chg.
6970db9d74eSMina Almasry *
6980db9d74eSMina Almasry * Return the number of new huge pages added to the map. This number is greater
6990db9d74eSMina Almasry * than or equal to zero. If file_region entries needed to be allocated for
7007c8de358SEthon Paul * this operation and we were not able to allocate, it returns -ENOMEM.
7010db9d74eSMina Almasry * region_add of regions of length 1 never allocate file_regions and cannot
7020db9d74eSMina Almasry * fail; region_chg will always allocate at least 1 entry and a region_add for
7030db9d74eSMina Almasry * 1 page will only require at most 1 entry.
7041dd308a7SMike Kravetz */
region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg)7050db9d74eSMina Almasry static long region_add(struct resv_map *resv, long f, long t,
706075a61d0SMina Almasry long in_regions_needed, struct hstate *h,
707075a61d0SMina Almasry struct hugetlb_cgroup *h_cg)
70896822904SAndy Whitcroft {
7090db9d74eSMina Almasry long add = 0, actual_regions_needed = 0;
71096822904SAndy Whitcroft
7117b24d861SDavidlohr Bueso spin_lock(&resv->lock);
7120db9d74eSMina Almasry retry:
7130db9d74eSMina Almasry
7140db9d74eSMina Almasry /* Count how many regions are actually needed to execute this add. */
715972a3da3SWei Yang add_reservation_in_range(resv, f, t, NULL, NULL,
716972a3da3SWei Yang &actual_regions_needed);
71796822904SAndy Whitcroft
7185e911373SMike Kravetz /*
7190db9d74eSMina Almasry * Check for sufficient descriptors in the cache to accommodate
7200db9d74eSMina Almasry * this add operation. Note that actual_regions_needed may be greater
7210db9d74eSMina Almasry * than in_regions_needed, as the resv_map may have been modified since
7220db9d74eSMina Almasry * the region_chg call. In this case, we need to make sure that we
7230db9d74eSMina Almasry * allocate extra entries, such that we have enough for all the
7240db9d74eSMina Almasry * existing adds_in_progress, plus the excess needed for this
7250db9d74eSMina Almasry * operation.
7265e911373SMike Kravetz */
7270db9d74eSMina Almasry if (actual_regions_needed > in_regions_needed &&
7280db9d74eSMina Almasry resv->region_cache_count <
7290db9d74eSMina Almasry resv->adds_in_progress +
7300db9d74eSMina Almasry (actual_regions_needed - in_regions_needed)) {
7310db9d74eSMina Almasry /* region_add operation of range 1 should never need to
7320db9d74eSMina Almasry * allocate file_region entries.
7330db9d74eSMina Almasry */
7340db9d74eSMina Almasry VM_BUG_ON(t - f <= 1);
7355e911373SMike Kravetz
7360db9d74eSMina Almasry if (allocate_file_region_entries(
7370db9d74eSMina Almasry resv, actual_regions_needed - in_regions_needed)) {
7380db9d74eSMina Almasry return -ENOMEM;
7395e911373SMike Kravetz }
7405e911373SMike Kravetz
7410db9d74eSMina Almasry goto retry;
7420db9d74eSMina Almasry }
743cf3ad20bSMike Kravetz
744972a3da3SWei Yang add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
7450db9d74eSMina Almasry
7460db9d74eSMina Almasry resv->adds_in_progress -= in_regions_needed;
7470db9d74eSMina Almasry
7487b24d861SDavidlohr Bueso spin_unlock(&resv->lock);
749cf3ad20bSMike Kravetz return add;
75096822904SAndy Whitcroft }
75196822904SAndy Whitcroft
7521dd308a7SMike Kravetz /*
7531dd308a7SMike Kravetz * Examine the existing reserve map and determine how many
7541dd308a7SMike Kravetz * huge pages in the specified range [f, t) are NOT currently
7551dd308a7SMike Kravetz * represented. This routine is called before a subsequent
7561dd308a7SMike Kravetz * call to region_add that will actually modify the reserve
7571dd308a7SMike Kravetz * map to add the specified range [f, t). region_chg does
7581dd308a7SMike Kravetz * not change the number of huge pages represented by the
7590db9d74eSMina Almasry * map. A number of new file_region structures is added to the cache as a
7600db9d74eSMina Almasry * placeholder, for the subsequent region_add call to use. At least 1
7610db9d74eSMina Almasry * file_region structure is added.
7620db9d74eSMina Almasry *
7630db9d74eSMina Almasry * out_regions_needed is the number of regions added to the
7640db9d74eSMina Almasry * resv->adds_in_progress. This value needs to be provided to a follow up call
7650db9d74eSMina Almasry * to region_add or region_abort for proper accounting.
7665e911373SMike Kravetz *
7675e911373SMike Kravetz * Returns the number of huge pages that need to be added to the existing
7685e911373SMike Kravetz * reservation map for the range [f, t). This number is greater or equal to
7695e911373SMike Kravetz * zero. -ENOMEM is returned if a new file_region structure or cache entry
7705e911373SMike Kravetz * is needed and can not be allocated.
7711dd308a7SMike Kravetz */
region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed)7720db9d74eSMina Almasry static long region_chg(struct resv_map *resv, long f, long t,
7730db9d74eSMina Almasry long *out_regions_needed)
77496822904SAndy Whitcroft {
77596822904SAndy Whitcroft long chg = 0;
77696822904SAndy Whitcroft
7777b24d861SDavidlohr Bueso spin_lock(&resv->lock);
7785e911373SMike Kravetz
779972a3da3SWei Yang /* Count how many hugepages in this range are NOT represented. */
780075a61d0SMina Almasry chg = add_reservation_in_range(resv, f, t, NULL, NULL,
781972a3da3SWei Yang out_regions_needed);
7825e911373SMike Kravetz
7830db9d74eSMina Almasry if (*out_regions_needed == 0)
7840db9d74eSMina Almasry *out_regions_needed = 1;
7855e911373SMike Kravetz
7860db9d74eSMina Almasry if (allocate_file_region_entries(resv, *out_regions_needed))
7875e911373SMike Kravetz return -ENOMEM;
7885e911373SMike Kravetz
7890db9d74eSMina Almasry resv->adds_in_progress += *out_regions_needed;
79096822904SAndy Whitcroft
7917b24d861SDavidlohr Bueso spin_unlock(&resv->lock);
79296822904SAndy Whitcroft return chg;
79396822904SAndy Whitcroft }
79496822904SAndy Whitcroft
7951dd308a7SMike Kravetz /*
7965e911373SMike Kravetz * Abort the in progress add operation. The adds_in_progress field
7975e911373SMike Kravetz * of the resv_map keeps track of the operations in progress between
7985e911373SMike Kravetz * calls to region_chg and region_add. Operations are sometimes
7995e911373SMike Kravetz * aborted after the call to region_chg. In such cases, region_abort
8000db9d74eSMina Almasry * is called to decrement the adds_in_progress counter. regions_needed
8010db9d74eSMina Almasry * is the value returned by the region_chg call, it is used to decrement
8020db9d74eSMina Almasry * the adds_in_progress counter.
8035e911373SMike Kravetz *
8045e911373SMike Kravetz * NOTE: The range arguments [f, t) are not needed or used in this
8055e911373SMike Kravetz * routine. They are kept to make reading the calling code easier as
8065e911373SMike Kravetz * arguments will match the associated region_chg call.
8075e911373SMike Kravetz */
region_abort(struct resv_map * resv,long f,long t,long regions_needed)8080db9d74eSMina Almasry static void region_abort(struct resv_map *resv, long f, long t,
8090db9d74eSMina Almasry long regions_needed)
8105e911373SMike Kravetz {
8115e911373SMike Kravetz spin_lock(&resv->lock);
8125e911373SMike Kravetz VM_BUG_ON(!resv->region_cache_count);
8130db9d74eSMina Almasry resv->adds_in_progress -= regions_needed;
8145e911373SMike Kravetz spin_unlock(&resv->lock);
8155e911373SMike Kravetz }
8165e911373SMike Kravetz
8175e911373SMike Kravetz /*
818feba16e2SMike Kravetz * Delete the specified range [f, t) from the reserve map. If the
819feba16e2SMike Kravetz * t parameter is LONG_MAX, this indicates that ALL regions after f
820feba16e2SMike Kravetz * should be deleted. Locate the regions which intersect [f, t)
821feba16e2SMike Kravetz * and either trim, delete or split the existing regions.
822feba16e2SMike Kravetz *
823feba16e2SMike Kravetz * Returns the number of huge pages deleted from the reserve map.
824feba16e2SMike Kravetz * In the normal case, the return value is zero or more. In the
825feba16e2SMike Kravetz * case where a region must be split, a new region descriptor must
826feba16e2SMike Kravetz * be allocated. If the allocation fails, -ENOMEM will be returned.
827feba16e2SMike Kravetz * NOTE: If the parameter t == LONG_MAX, then we will never split
828feba16e2SMike Kravetz * a region and possibly return -ENOMEM. Callers specifying
829feba16e2SMike Kravetz * t == LONG_MAX do not need to check for -ENOMEM error.
8301dd308a7SMike Kravetz */
region_del(struct resv_map * resv,long f,long t)831feba16e2SMike Kravetz static long region_del(struct resv_map *resv, long f, long t)
83296822904SAndy Whitcroft {
8331406ec9bSJoonsoo Kim struct list_head *head = &resv->regions;
83496822904SAndy Whitcroft struct file_region *rg, *trg;
835feba16e2SMike Kravetz struct file_region *nrg = NULL;
836feba16e2SMike Kravetz long del = 0;
83796822904SAndy Whitcroft
838feba16e2SMike Kravetz retry:
8397b24d861SDavidlohr Bueso spin_lock(&resv->lock);
840feba16e2SMike Kravetz list_for_each_entry_safe(rg, trg, head, link) {
841dbe409e4SMike Kravetz /*
842dbe409e4SMike Kravetz * Skip regions before the range to be deleted. file_region
843dbe409e4SMike Kravetz * ranges are normally of the form [from, to). However, there
844dbe409e4SMike Kravetz * may be a "placeholder" entry in the map which is of the form
845dbe409e4SMike Kravetz * (from, to) with from == to. Check for placeholder entries
846dbe409e4SMike Kravetz * at the beginning of the range to be deleted.
847dbe409e4SMike Kravetz */
848dbe409e4SMike Kravetz if (rg->to <= f && (rg->to != rg->from || rg->to != f))
849feba16e2SMike Kravetz continue;
850dbe409e4SMike Kravetz
851feba16e2SMike Kravetz if (rg->from >= t)
85296822904SAndy Whitcroft break;
85396822904SAndy Whitcroft
854feba16e2SMike Kravetz if (f > rg->from && t < rg->to) { /* Must split region */
855feba16e2SMike Kravetz /*
856feba16e2SMike Kravetz * Check for an entry in the cache before dropping
857feba16e2SMike Kravetz * lock and attempting allocation.
858feba16e2SMike Kravetz */
859feba16e2SMike Kravetz if (!nrg &&
860feba16e2SMike Kravetz resv->region_cache_count > resv->adds_in_progress) {
861feba16e2SMike Kravetz nrg = list_first_entry(&resv->region_cache,
862feba16e2SMike Kravetz struct file_region,
863feba16e2SMike Kravetz link);
864feba16e2SMike Kravetz list_del(&nrg->link);
865feba16e2SMike Kravetz resv->region_cache_count--;
86696822904SAndy Whitcroft }
86796822904SAndy Whitcroft
868feba16e2SMike Kravetz if (!nrg) {
869feba16e2SMike Kravetz spin_unlock(&resv->lock);
870feba16e2SMike Kravetz nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
871feba16e2SMike Kravetz if (!nrg)
872feba16e2SMike Kravetz return -ENOMEM;
873feba16e2SMike Kravetz goto retry;
874feba16e2SMike Kravetz }
875feba16e2SMike Kravetz
876feba16e2SMike Kravetz del += t - f;
87779aa925bSMike Kravetz hugetlb_cgroup_uncharge_file_region(
878d85aecf2SMiaohe Lin resv, rg, t - f, false);
879feba16e2SMike Kravetz
880feba16e2SMike Kravetz /* New entry for end of split region */
881feba16e2SMike Kravetz nrg->from = t;
882feba16e2SMike Kravetz nrg->to = rg->to;
883075a61d0SMina Almasry
884075a61d0SMina Almasry copy_hugetlb_cgroup_uncharge_info(nrg, rg);
885075a61d0SMina Almasry
886feba16e2SMike Kravetz INIT_LIST_HEAD(&nrg->link);
887feba16e2SMike Kravetz
888feba16e2SMike Kravetz /* Original entry is trimmed */
889feba16e2SMike Kravetz rg->to = f;
890feba16e2SMike Kravetz
891feba16e2SMike Kravetz list_add(&nrg->link, &rg->link);
892feba16e2SMike Kravetz nrg = NULL;
89396822904SAndy Whitcroft break;
894feba16e2SMike Kravetz }
895feba16e2SMike Kravetz
896feba16e2SMike Kravetz if (f <= rg->from && t >= rg->to) { /* Remove entire region */
897feba16e2SMike Kravetz del += rg->to - rg->from;
898075a61d0SMina Almasry hugetlb_cgroup_uncharge_file_region(resv, rg,
899d85aecf2SMiaohe Lin rg->to - rg->from, true);
90096822904SAndy Whitcroft list_del(&rg->link);
90196822904SAndy Whitcroft kfree(rg);
902feba16e2SMike Kravetz continue;
90396822904SAndy Whitcroft }
9047b24d861SDavidlohr Bueso
905feba16e2SMike Kravetz if (f <= rg->from) { /* Trim beginning of region */
906075a61d0SMina Almasry hugetlb_cgroup_uncharge_file_region(resv, rg,
907d85aecf2SMiaohe Lin t - rg->from, false);
908075a61d0SMina Almasry
90979aa925bSMike Kravetz del += t - rg->from;
91079aa925bSMike Kravetz rg->from = t;
91179aa925bSMike Kravetz } else { /* Trim end of region */
912075a61d0SMina Almasry hugetlb_cgroup_uncharge_file_region(resv, rg,
913d85aecf2SMiaohe Lin rg->to - f, false);
91479aa925bSMike Kravetz
91579aa925bSMike Kravetz del += rg->to - f;
91679aa925bSMike Kravetz rg->to = f;
917feba16e2SMike Kravetz }
918feba16e2SMike Kravetz }
919feba16e2SMike Kravetz
9207b24d861SDavidlohr Bueso spin_unlock(&resv->lock);
921feba16e2SMike Kravetz kfree(nrg);
922feba16e2SMike Kravetz return del;
92396822904SAndy Whitcroft }
92496822904SAndy Whitcroft
9251dd308a7SMike Kravetz /*
926b5cec28dSMike Kravetz * A rare out of memory error was encountered which prevented removal of
927b5cec28dSMike Kravetz * the reserve map region for a page. The huge page itself was free'ed
928b5cec28dSMike Kravetz * and removed from the page cache. This routine will adjust the subpool
929b5cec28dSMike Kravetz * usage count, and the global reserve count if needed. By incrementing
930b5cec28dSMike Kravetz * these counts, the reserve map entry which could not be deleted will
931b5cec28dSMike Kravetz * appear as a "reserved" entry instead of simply dangling with incorrect
932b5cec28dSMike Kravetz * counts.
933b5cec28dSMike Kravetz */
hugetlb_fix_reserve_counts(struct inode * inode)93472e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode)
935b5cec28dSMike Kravetz {
936b5cec28dSMike Kravetz struct hugepage_subpool *spool = subpool_inode(inode);
937b5cec28dSMike Kravetz long rsv_adjust;
938da56388cSMiaohe Lin bool reserved = false;
939b5cec28dSMike Kravetz
940b5cec28dSMike Kravetz rsv_adjust = hugepage_subpool_get_pages(spool, 1);
941da56388cSMiaohe Lin if (rsv_adjust > 0) {
942b5cec28dSMike Kravetz struct hstate *h = hstate_inode(inode);
943b5cec28dSMike Kravetz
944da56388cSMiaohe Lin if (!hugetlb_acct_memory(h, 1))
945da56388cSMiaohe Lin reserved = true;
946da56388cSMiaohe Lin } else if (!rsv_adjust) {
947da56388cSMiaohe Lin reserved = true;
948b5cec28dSMike Kravetz }
949da56388cSMiaohe Lin
950da56388cSMiaohe Lin if (!reserved)
951da56388cSMiaohe Lin pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
952b5cec28dSMike Kravetz }
953b5cec28dSMike Kravetz
954b5cec28dSMike Kravetz /*
9551dd308a7SMike Kravetz * Count and return the number of huge pages in the reserve map
9561dd308a7SMike Kravetz * that intersect with the range [f, t).
9571dd308a7SMike Kravetz */
region_count(struct resv_map * resv,long f,long t)9581406ec9bSJoonsoo Kim static long region_count(struct resv_map *resv, long f, long t)
95984afd99bSAndy Whitcroft {
9601406ec9bSJoonsoo Kim struct list_head *head = &resv->regions;
96184afd99bSAndy Whitcroft struct file_region *rg;
96284afd99bSAndy Whitcroft long chg = 0;
96384afd99bSAndy Whitcroft
9647b24d861SDavidlohr Bueso spin_lock(&resv->lock);
96584afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */
96684afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) {
967f2135a4aSWang Sheng-Hui long seg_from;
968f2135a4aSWang Sheng-Hui long seg_to;
96984afd99bSAndy Whitcroft
97084afd99bSAndy Whitcroft if (rg->to <= f)
97184afd99bSAndy Whitcroft continue;
97284afd99bSAndy Whitcroft if (rg->from >= t)
97384afd99bSAndy Whitcroft break;
97484afd99bSAndy Whitcroft
97584afd99bSAndy Whitcroft seg_from = max(rg->from, f);
97684afd99bSAndy Whitcroft seg_to = min(rg->to, t);
97784afd99bSAndy Whitcroft
97884afd99bSAndy Whitcroft chg += seg_to - seg_from;
97984afd99bSAndy Whitcroft }
9807b24d861SDavidlohr Bueso spin_unlock(&resv->lock);
98184afd99bSAndy Whitcroft
98284afd99bSAndy Whitcroft return chg;
98384afd99bSAndy Whitcroft }
98484afd99bSAndy Whitcroft
98596822904SAndy Whitcroft /*
986e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within
987e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here.
988e7c4b0bfSAndy Whitcroft */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)989a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
990a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address)
991e7c4b0bfSAndy Whitcroft {
992a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) +
993a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h));
994e7c4b0bfSAndy Whitcroft }
995e7c4b0bfSAndy Whitcroft
linear_hugepage_index(struct vm_area_struct * vma,unsigned long address)9960fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
9970fe6e20bSNaoya Horiguchi unsigned long address)
9980fe6e20bSNaoya Horiguchi {
9990fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address);
10000fe6e20bSNaoya Horiguchi }
1001dee41079SDan Williams EXPORT_SYMBOL_GPL(linear_hugepage_index);
10020fe6e20bSNaoya Horiguchi
10038cfd014eSMatthew Wilcox (Oracle) /**
10048cfd014eSMatthew Wilcox (Oracle) * vma_kernel_pagesize - Page size granularity for this VMA.
10058cfd014eSMatthew Wilcox (Oracle) * @vma: The user mapping.
10068cfd014eSMatthew Wilcox (Oracle) *
10078cfd014eSMatthew Wilcox (Oracle) * Folios in this VMA will be aligned to, and at least the size of the
10088cfd014eSMatthew Wilcox (Oracle) * number of bytes returned by this function.
10098cfd014eSMatthew Wilcox (Oracle) *
10108cfd014eSMatthew Wilcox (Oracle) * Return: The default size of the folios allocated when backing a VMA.
101108fba699SMel Gorman */
vma_kernel_pagesize(struct vm_area_struct * vma)101208fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
101308fba699SMel Gorman {
101405ea8860SDan Williams if (vma->vm_ops && vma->vm_ops->pagesize)
101505ea8860SDan Williams return vma->vm_ops->pagesize(vma);
101608fba699SMel Gorman return PAGE_SIZE;
101708fba699SMel Gorman }
1018f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
101908fba699SMel Gorman
102008fba699SMel Gorman /*
10213340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority
10223340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On
102309135cc5SDan Williams * architectures where it differs, an architecture-specific 'strong'
102409135cc5SDan Williams * version of this symbol is required.
10253340289dSMel Gorman */
vma_mmu_pagesize(struct vm_area_struct * vma)102609135cc5SDan Williams __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
10273340289dSMel Gorman {
10283340289dSMel Gorman return vma_kernel_pagesize(vma);
10293340289dSMel Gorman }
10303340289dSMel Gorman
10313340289dSMel Gorman /*
103284afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom
103384afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to
103484afd99bSAndy Whitcroft * alignment.
103584afd99bSAndy Whitcroft */
103684afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0)
103784afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
103804f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
103984afd99bSAndy Whitcroft
1040a1e78772SMel Gorman /*
1041a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for
1042a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1043a1e78772SMel Gorman * is guaranteed to have their future faults succeed.
1044a1e78772SMel Gorman *
10458d9bfb26SMike Kravetz * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1046a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe
1047a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no
1048a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values.
104984afd99bSAndy Whitcroft *
105084afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different
105184afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated
105284afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file
105384afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even
105484afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map
105584afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which
105684afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed
105784afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated.
1058a1e78772SMel Gorman */
get_vma_private_data(struct vm_area_struct * vma)1059e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1060e7c4b0bfSAndy Whitcroft {
1061e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data;
1062e7c4b0bfSAndy Whitcroft }
1063e7c4b0bfSAndy Whitcroft
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)1064e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
1065e7c4b0bfSAndy Whitcroft unsigned long value)
1066e7c4b0bfSAndy Whitcroft {
1067e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value;
1068e7c4b0bfSAndy Whitcroft }
1069e7c4b0bfSAndy Whitcroft
1070e9fe92aeSMina Almasry static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map * resv_map,struct hugetlb_cgroup * h_cg,struct hstate * h)1071e9fe92aeSMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1072e9fe92aeSMina Almasry struct hugetlb_cgroup *h_cg,
1073e9fe92aeSMina Almasry struct hstate *h)
1074e9fe92aeSMina Almasry {
1075e9fe92aeSMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
1076e9fe92aeSMina Almasry if (!h_cg || !h) {
1077e9fe92aeSMina Almasry resv_map->reservation_counter = NULL;
1078e9fe92aeSMina Almasry resv_map->pages_per_hpage = 0;
1079e9fe92aeSMina Almasry resv_map->css = NULL;
1080e9fe92aeSMina Almasry } else {
1081e9fe92aeSMina Almasry resv_map->reservation_counter =
1082e9fe92aeSMina Almasry &h_cg->rsvd_hugepage[hstate_index(h)];
1083e9fe92aeSMina Almasry resv_map->pages_per_hpage = pages_per_huge_page(h);
1084e9fe92aeSMina Almasry resv_map->css = &h_cg->css;
1085e9fe92aeSMina Almasry }
1086e9fe92aeSMina Almasry #endif
1087e9fe92aeSMina Almasry }
1088e9fe92aeSMina Almasry
resv_map_alloc(void)10899119a41eSJoonsoo Kim struct resv_map *resv_map_alloc(void)
109084afd99bSAndy Whitcroft {
109184afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
10925e911373SMike Kravetz struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
10935e911373SMike Kravetz
10945e911373SMike Kravetz if (!resv_map || !rg) {
10955e911373SMike Kravetz kfree(resv_map);
10965e911373SMike Kravetz kfree(rg);
109784afd99bSAndy Whitcroft return NULL;
10985e911373SMike Kravetz }
109984afd99bSAndy Whitcroft
110084afd99bSAndy Whitcroft kref_init(&resv_map->refs);
11017b24d861SDavidlohr Bueso spin_lock_init(&resv_map->lock);
110284afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions);
1103bf491692SRik van Riel init_rwsem(&resv_map->rw_sema);
110484afd99bSAndy Whitcroft
11055e911373SMike Kravetz resv_map->adds_in_progress = 0;
1106e9fe92aeSMina Almasry /*
1107e9fe92aeSMina Almasry * Initialize these to 0. On shared mappings, 0's here indicate these
1108e9fe92aeSMina Almasry * fields don't do cgroup accounting. On private mappings, these will be
1109e9fe92aeSMina Almasry * re-initialized to the proper values, to indicate that hugetlb cgroup
1110e9fe92aeSMina Almasry * reservations are to be un-charged from here.
1111e9fe92aeSMina Almasry */
1112e9fe92aeSMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
11135e911373SMike Kravetz
11145e911373SMike Kravetz INIT_LIST_HEAD(&resv_map->region_cache);
11155e911373SMike Kravetz list_add(&rg->link, &resv_map->region_cache);
11165e911373SMike Kravetz resv_map->region_cache_count = 1;
11175e911373SMike Kravetz
111884afd99bSAndy Whitcroft return resv_map;
111984afd99bSAndy Whitcroft }
112084afd99bSAndy Whitcroft
resv_map_release(struct kref * ref)11219119a41eSJoonsoo Kim void resv_map_release(struct kref *ref)
112284afd99bSAndy Whitcroft {
112384afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
11245e911373SMike Kravetz struct list_head *head = &resv_map->region_cache;
11255e911373SMike Kravetz struct file_region *rg, *trg;
112684afd99bSAndy Whitcroft
112784afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */
1128feba16e2SMike Kravetz region_del(resv_map, 0, LONG_MAX);
11295e911373SMike Kravetz
11305e911373SMike Kravetz /* ... and any entries left in the cache */
11315e911373SMike Kravetz list_for_each_entry_safe(rg, trg, head, link) {
11325e911373SMike Kravetz list_del(&rg->link);
11335e911373SMike Kravetz kfree(rg);
11345e911373SMike Kravetz }
11355e911373SMike Kravetz
11365e911373SMike Kravetz VM_BUG_ON(resv_map->adds_in_progress);
11375e911373SMike Kravetz
113884afd99bSAndy Whitcroft kfree(resv_map);
113984afd99bSAndy Whitcroft }
114084afd99bSAndy Whitcroft
inode_resv_map(struct inode * inode)11414e35f483SJoonsoo Kim static inline struct resv_map *inode_resv_map(struct inode *inode)
11424e35f483SJoonsoo Kim {
1143f27a5136SMike Kravetz /*
1144f27a5136SMike Kravetz * At inode evict time, i_mapping may not point to the original
1145f27a5136SMike Kravetz * address space within the inode. This original address space
1146f27a5136SMike Kravetz * contains the pointer to the resv_map. So, always use the
1147f27a5136SMike Kravetz * address space embedded within the inode.
1148f27a5136SMike Kravetz * The VERY common case is inode->mapping == &inode->i_data but,
1149f27a5136SMike Kravetz * this may not be true for device special inodes.
1150f27a5136SMike Kravetz */
1151f27a5136SMike Kravetz return (struct resv_map *)(&inode->i_data)->private_data;
11524e35f483SJoonsoo Kim }
11534e35f483SJoonsoo Kim
vma_resv_map(struct vm_area_struct * vma)115484afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1155a1e78772SMel Gorman {
115681d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
11574e35f483SJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE) {
11584e35f483SJoonsoo Kim struct address_space *mapping = vma->vm_file->f_mapping;
11594e35f483SJoonsoo Kim struct inode *inode = mapping->host;
11604e35f483SJoonsoo Kim
11614e35f483SJoonsoo Kim return inode_resv_map(inode);
11624e35f483SJoonsoo Kim
11634e35f483SJoonsoo Kim } else {
116484afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) &
116584afd99bSAndy Whitcroft ~HPAGE_RESV_MASK);
11664e35f483SJoonsoo Kim }
1167a1e78772SMel Gorman }
1168a1e78772SMel Gorman
set_vma_resv_map(struct vm_area_struct * vma,struct resv_map * map)116984afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1170a1e78772SMel Gorman {
117181d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
117281d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1173a1e78772SMel Gorman
117492fe9dcbSRik van Riel set_vma_private_data(vma, (unsigned long)map);
117504f2cbe3SMel Gorman }
117604f2cbe3SMel Gorman
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)117704f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
117804f2cbe3SMel Gorman {
117981d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
118081d1b09cSSasha Levin VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1181e7c4b0bfSAndy Whitcroft
1182e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags);
118304f2cbe3SMel Gorman }
118404f2cbe3SMel Gorman
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)118504f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
118604f2cbe3SMel Gorman {
118781d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1188e7c4b0bfSAndy Whitcroft
1189e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0;
1190a1e78772SMel Gorman }
1191a1e78772SMel Gorman
__vma_private_lock(struct vm_area_struct * vma)1192512b420aSMike Kravetz bool __vma_private_lock(struct vm_area_struct *vma)
1193512b420aSMike Kravetz {
1194512b420aSMike Kravetz return !(vma->vm_flags & VM_MAYSHARE) &&
1195512b420aSMike Kravetz get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1196512b420aSMike Kravetz is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1197512b420aSMike Kravetz }
1198512b420aSMike Kravetz
hugetlb_dup_vma_private(struct vm_area_struct * vma)11998d9bfb26SMike Kravetz void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1200a1e78772SMel Gorman {
120181d1b09cSSasha Levin VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
12028d9bfb26SMike Kravetz /*
12038d9bfb26SMike Kravetz * Clear vm_private_data
1204612b8a31SMike Kravetz * - For shared mappings this is a per-vma semaphore that may be
1205612b8a31SMike Kravetz * allocated in a subsequent call to hugetlb_vm_op_open.
1206612b8a31SMike Kravetz * Before clearing, make sure pointer is not associated with vma
1207612b8a31SMike Kravetz * as this will leak the structure. This is the case when called
1208612b8a31SMike Kravetz * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1209612b8a31SMike Kravetz * been called to allocate a new structure.
12108d9bfb26SMike Kravetz * - For MAP_PRIVATE mappings, this is the reserve map which does
12118d9bfb26SMike Kravetz * not apply to children. Faults generated by the children are
12128d9bfb26SMike Kravetz * not guaranteed to succeed, even if read-only.
12138d9bfb26SMike Kravetz */
1214612b8a31SMike Kravetz if (vma->vm_flags & VM_MAYSHARE) {
1215612b8a31SMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1216612b8a31SMike Kravetz
1217612b8a31SMike Kravetz if (vma_lock && vma_lock->vma != vma)
1218612b8a31SMike Kravetz vma->vm_private_data = NULL;
1219612b8a31SMike Kravetz } else
1220612b8a31SMike Kravetz vma->vm_private_data = NULL;
1221a1e78772SMel Gorman }
1222a1e78772SMel Gorman
1223550a7d60SMina Almasry /*
1224550a7d60SMina Almasry * Reset and decrement one ref on hugepage private reservation.
12258651a137SLorenzo Stoakes * Called with mm->mmap_lock writer semaphore held.
1226550a7d60SMina Almasry * This function should be only used by move_vma() and operate on
1227550a7d60SMina Almasry * same sized vma. It should never come here with last ref on the
1228550a7d60SMina Almasry * reservation.
1229550a7d60SMina Almasry */
clear_vma_resv_huge_pages(struct vm_area_struct * vma)1230550a7d60SMina Almasry void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1231550a7d60SMina Almasry {
1232550a7d60SMina Almasry /*
1233550a7d60SMina Almasry * Clear the old hugetlb private page reservation.
1234550a7d60SMina Almasry * It has already been transferred to new_vma.
1235550a7d60SMina Almasry *
1236550a7d60SMina Almasry * During a mremap() operation of a hugetlb vma we call move_vma()
1237550a7d60SMina Almasry * which copies vma into new_vma and unmaps vma. After the copy
1238550a7d60SMina Almasry * operation both new_vma and vma share a reference to the resv_map
1239550a7d60SMina Almasry * struct, and at that point vma is about to be unmapped. We don't
1240550a7d60SMina Almasry * want to return the reservation to the pool at unmap of vma because
1241550a7d60SMina Almasry * the reservation still lives on in new_vma, so simply decrement the
1242550a7d60SMina Almasry * ref here and remove the resv_map reference from this vma.
1243550a7d60SMina Almasry */
1244550a7d60SMina Almasry struct resv_map *reservations = vma_resv_map(vma);
1245550a7d60SMina Almasry
1246afe041c2SBui Quang Minh if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1247afe041c2SBui Quang Minh resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1248550a7d60SMina Almasry kref_put(&reservations->refs, resv_map_release);
1249afe041c2SBui Quang Minh }
1250550a7d60SMina Almasry
12518d9bfb26SMike Kravetz hugetlb_dup_vma_private(vma);
1252550a7d60SMina Almasry }
1253550a7d60SMina Almasry
1254a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
vma_has_reserves(struct vm_area_struct * vma,long chg)1255559ec2f8SNicholas Krause static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1256a1e78772SMel Gorman {
1257af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_NORESERVE) {
1258af0ed73eSJoonsoo Kim /*
1259af0ed73eSJoonsoo Kim * This address is already reserved by other process(chg == 0),
1260af0ed73eSJoonsoo Kim * so, we should decrement reserved count. Without decrementing,
1261af0ed73eSJoonsoo Kim * reserve count remains after releasing inode, because this
1262af0ed73eSJoonsoo Kim * allocated page will go into page cache and is regarded as
1263af0ed73eSJoonsoo Kim * coming from reserved pool in releasing step. Currently, we
1264af0ed73eSJoonsoo Kim * don't have any other solution to deal with this situation
1265af0ed73eSJoonsoo Kim * properly, so add work-around here.
1266af0ed73eSJoonsoo Kim */
1267af0ed73eSJoonsoo Kim if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1268559ec2f8SNicholas Krause return true;
1269af0ed73eSJoonsoo Kim else
1270559ec2f8SNicholas Krause return false;
1271af0ed73eSJoonsoo Kim }
1272a63884e9SJoonsoo Kim
1273a63884e9SJoonsoo Kim /* Shared mappings always use reserves */
12741fb1b0e9SMike Kravetz if (vma->vm_flags & VM_MAYSHARE) {
12751fb1b0e9SMike Kravetz /*
12761fb1b0e9SMike Kravetz * We know VM_NORESERVE is not set. Therefore, there SHOULD
12771fb1b0e9SMike Kravetz * be a region map for all pages. The only situation where
12781fb1b0e9SMike Kravetz * there is no region map is if a hole was punched via
12797c8de358SEthon Paul * fallocate. In this case, there really are no reserves to
12801fb1b0e9SMike Kravetz * use. This situation is indicated if chg != 0.
12811fb1b0e9SMike Kravetz */
12821fb1b0e9SMike Kravetz if (chg)
12831fb1b0e9SMike Kravetz return false;
12841fb1b0e9SMike Kravetz else
1285559ec2f8SNicholas Krause return true;
12861fb1b0e9SMike Kravetz }
1287a63884e9SJoonsoo Kim
1288a63884e9SJoonsoo Kim /*
1289a63884e9SJoonsoo Kim * Only the process that called mmap() has reserves for
1290a63884e9SJoonsoo Kim * private mappings.
1291a63884e9SJoonsoo Kim */
129267961f9dSMike Kravetz if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
129367961f9dSMike Kravetz /*
129467961f9dSMike Kravetz * Like the shared case above, a hole punch or truncate
129567961f9dSMike Kravetz * could have been performed on the private mapping.
129667961f9dSMike Kravetz * Examine the value of chg to determine if reserves
129767961f9dSMike Kravetz * actually exist or were previously consumed.
129867961f9dSMike Kravetz * Very Subtle - The value of chg comes from a previous
129967961f9dSMike Kravetz * call to vma_needs_reserves(). The reserve map for
130067961f9dSMike Kravetz * private mappings has different (opposite) semantics
130167961f9dSMike Kravetz * than that of shared mappings. vma_needs_reserves()
130267961f9dSMike Kravetz * has already taken this difference in semantics into
130367961f9dSMike Kravetz * account. Therefore, the meaning of chg is the same
130467961f9dSMike Kravetz * as in the shared case above. Code could easily be
130567961f9dSMike Kravetz * combined, but keeping it separate draws attention to
130667961f9dSMike Kravetz * subtle differences.
130767961f9dSMike Kravetz */
130867961f9dSMike Kravetz if (chg)
130967961f9dSMike Kravetz return false;
131067961f9dSMike Kravetz else
1311559ec2f8SNicholas Krause return true;
131267961f9dSMike Kravetz }
1313a63884e9SJoonsoo Kim
1314559ec2f8SNicholas Krause return false;
1315a1e78772SMel Gorman }
1316a1e78772SMel Gorman
enqueue_hugetlb_folio(struct hstate * h,struct folio * folio)1317240d67a8SSidhartha Kumar static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
13181da177e4SLinus Torvalds {
1319240d67a8SSidhartha Kumar int nid = folio_nid(folio);
13209487ca60SMike Kravetz
13219487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
1322240d67a8SSidhartha Kumar VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1323b65a4edaSMike Kravetz
1324240d67a8SSidhartha Kumar list_move(&folio->lru, &h->hugepage_freelists[nid]);
1325a5516438SAndi Kleen h->free_huge_pages++;
1326a5516438SAndi Kleen h->free_huge_pages_node[nid]++;
1327240d67a8SSidhartha Kumar folio_set_hugetlb_freed(folio);
13281da177e4SLinus Torvalds }
13291da177e4SLinus Torvalds
dequeue_hugetlb_folio_node_exact(struct hstate * h,int nid)1330a36f1e90SSidhartha Kumar static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1331a36f1e90SSidhartha Kumar int nid)
1332bf50bab2SNaoya Horiguchi {
1333a36f1e90SSidhartha Kumar struct folio *folio;
13341a08ae36SPavel Tatashin bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1335bf50bab2SNaoya Horiguchi
13369487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
1337a36f1e90SSidhartha Kumar list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1338a36f1e90SSidhartha Kumar if (pin && !folio_is_longterm_pinnable(folio))
1339bbe88753SJoonsoo Kim continue;
1340bbe88753SJoonsoo Kim
1341a36f1e90SSidhartha Kumar if (folio_test_hwpoison(folio))
13426664bfc8SWei Yang continue;
1343bbe88753SJoonsoo Kim
1344a36f1e90SSidhartha Kumar list_move(&folio->lru, &h->hugepage_activelist);
1345a36f1e90SSidhartha Kumar folio_ref_unfreeze(folio, 1);
1346a36f1e90SSidhartha Kumar folio_clear_hugetlb_freed(folio);
1347bf50bab2SNaoya Horiguchi h->free_huge_pages--;
1348bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--;
1349a36f1e90SSidhartha Kumar return folio;
1350bf50bab2SNaoya Horiguchi }
1351bf50bab2SNaoya Horiguchi
13526664bfc8SWei Yang return NULL;
13536664bfc8SWei Yang }
13546664bfc8SWei Yang
dequeue_hugetlb_folio_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1355a36f1e90SSidhartha Kumar static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1356a36f1e90SSidhartha Kumar int nid, nodemask_t *nmask)
135794310cbcSAnshuman Khandual {
13583e59fcb0SMichal Hocko unsigned int cpuset_mems_cookie;
13593e59fcb0SMichal Hocko struct zonelist *zonelist;
13603e59fcb0SMichal Hocko struct zone *zone;
13613e59fcb0SMichal Hocko struct zoneref *z;
136298fa15f3SAnshuman Khandual int node = NUMA_NO_NODE;
13633e59fcb0SMichal Hocko
13643e59fcb0SMichal Hocko zonelist = node_zonelist(nid, gfp_mask);
13653e59fcb0SMichal Hocko
13663e59fcb0SMichal Hocko retry_cpuset:
13673e59fcb0SMichal Hocko cpuset_mems_cookie = read_mems_allowed_begin();
13683e59fcb0SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1369a36f1e90SSidhartha Kumar struct folio *folio;
137094310cbcSAnshuman Khandual
13713e59fcb0SMichal Hocko if (!cpuset_zone_allowed(zone, gfp_mask))
13723e59fcb0SMichal Hocko continue;
13733e59fcb0SMichal Hocko /*
13743e59fcb0SMichal Hocko * no need to ask again on the same node. Pool is node rather than
13753e59fcb0SMichal Hocko * zone aware
13763e59fcb0SMichal Hocko */
13773e59fcb0SMichal Hocko if (zone_to_nid(zone) == node)
13783e59fcb0SMichal Hocko continue;
13793e59fcb0SMichal Hocko node = zone_to_nid(zone);
138094310cbcSAnshuman Khandual
1381a36f1e90SSidhartha Kumar folio = dequeue_hugetlb_folio_node_exact(h, node);
1382a36f1e90SSidhartha Kumar if (folio)
1383a36f1e90SSidhartha Kumar return folio;
138494310cbcSAnshuman Khandual }
13853e59fcb0SMichal Hocko if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
13863e59fcb0SMichal Hocko goto retry_cpuset;
13873e59fcb0SMichal Hocko
138894310cbcSAnshuman Khandual return NULL;
138994310cbcSAnshuman Khandual }
139094310cbcSAnshuman Khandual
available_huge_pages(struct hstate * h)13918346d69dSXin Hao static unsigned long available_huge_pages(struct hstate *h)
13928346d69dSXin Hao {
13938346d69dSXin Hao return h->free_huge_pages - h->resv_huge_pages;
13948346d69dSXin Hao }
13958346d69dSXin Hao
dequeue_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,int avoid_reserve,long chg)1396ff7d853bSSidhartha Kumar static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1397a5516438SAndi Kleen struct vm_area_struct *vma,
1398af0ed73eSJoonsoo Kim unsigned long address, int avoid_reserve,
1399af0ed73eSJoonsoo Kim long chg)
14001da177e4SLinus Torvalds {
1401a36f1e90SSidhartha Kumar struct folio *folio = NULL;
1402480eccf9SLee Schermerhorn struct mempolicy *mpol;
140304ec6264SVlastimil Babka gfp_t gfp_mask;
14043e59fcb0SMichal Hocko nodemask_t *nodemask;
140504ec6264SVlastimil Babka int nid;
14061da177e4SLinus Torvalds
1407a1e78772SMel Gorman /*
1408a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent
1409a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are
1410a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed
1411a1e78772SMel Gorman */
14128346d69dSXin Hao if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1413c0ff7453SMiao Xie goto err;
1414a1e78772SMel Gorman
141504f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */
14168346d69dSXin Hao if (avoid_reserve && !available_huge_pages(h))
14176eab04a8SJustin P. Mattock goto err;
141804f2cbe3SMel Gorman
141904ec6264SVlastimil Babka gfp_mask = htlb_alloc_mask(h);
142004ec6264SVlastimil Babka nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1421cfcaa66fSBen Widawsky
1422cfcaa66fSBen Widawsky if (mpol_is_preferred_many(mpol)) {
1423a36f1e90SSidhartha Kumar folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1424a36f1e90SSidhartha Kumar nid, nodemask);
1425cfcaa66fSBen Widawsky
1426cfcaa66fSBen Widawsky /* Fallback to all nodes if page==NULL */
1427cfcaa66fSBen Widawsky nodemask = NULL;
1428cfcaa66fSBen Widawsky }
1429cfcaa66fSBen Widawsky
1430a36f1e90SSidhartha Kumar if (!folio)
1431a36f1e90SSidhartha Kumar folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1432a36f1e90SSidhartha Kumar nid, nodemask);
1433cfcaa66fSBen Widawsky
1434a36f1e90SSidhartha Kumar if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1435a36f1e90SSidhartha Kumar folio_set_hugetlb_restore_reserve(folio);
1436a63884e9SJoonsoo Kim h->resv_huge_pages--;
1437bf50bab2SNaoya Horiguchi }
1438cc9a6c87SMel Gorman
1439cc9a6c87SMel Gorman mpol_cond_put(mpol);
1440ff7d853bSSidhartha Kumar return folio;
1441cc9a6c87SMel Gorman
1442c0ff7453SMiao Xie err:
1443cc9a6c87SMel Gorman return NULL;
14441da177e4SLinus Torvalds }
14451da177e4SLinus Torvalds
14461cac6f2cSLuiz Capitulino /*
14471cac6f2cSLuiz Capitulino * common helper functions for hstate_next_node_to_{alloc|free}.
14481cac6f2cSLuiz Capitulino * We may have allocated or freed a huge page based on a different
14491cac6f2cSLuiz Capitulino * nodes_allowed previously, so h->next_node_to_{alloc|free} might
14501cac6f2cSLuiz Capitulino * be outside of *nodes_allowed. Ensure that we use an allowed
14511cac6f2cSLuiz Capitulino * node for alloc or free.
14521cac6f2cSLuiz Capitulino */
next_node_allowed(int nid,nodemask_t * nodes_allowed)14531cac6f2cSLuiz Capitulino static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
14541cac6f2cSLuiz Capitulino {
14550edaf86cSAndrew Morton nid = next_node_in(nid, *nodes_allowed);
14561cac6f2cSLuiz Capitulino VM_BUG_ON(nid >= MAX_NUMNODES);
14571cac6f2cSLuiz Capitulino
14581cac6f2cSLuiz Capitulino return nid;
14591cac6f2cSLuiz Capitulino }
14601cac6f2cSLuiz Capitulino
get_valid_node_allowed(int nid,nodemask_t * nodes_allowed)14611cac6f2cSLuiz Capitulino static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
14621cac6f2cSLuiz Capitulino {
14631cac6f2cSLuiz Capitulino if (!node_isset(nid, *nodes_allowed))
14641cac6f2cSLuiz Capitulino nid = next_node_allowed(nid, nodes_allowed);
14651cac6f2cSLuiz Capitulino return nid;
14661cac6f2cSLuiz Capitulino }
14671cac6f2cSLuiz Capitulino
14681cac6f2cSLuiz Capitulino /*
14691cac6f2cSLuiz Capitulino * returns the previously saved node ["this node"] from which to
14701cac6f2cSLuiz Capitulino * allocate a persistent huge page for the pool and advance the
14711cac6f2cSLuiz Capitulino * next node from which to allocate, handling wrap at end of node
14721cac6f2cSLuiz Capitulino * mask.
14731cac6f2cSLuiz Capitulino */
hstate_next_node_to_alloc(struct hstate * h,nodemask_t * nodes_allowed)14741cac6f2cSLuiz Capitulino static int hstate_next_node_to_alloc(struct hstate *h,
14751cac6f2cSLuiz Capitulino nodemask_t *nodes_allowed)
14761cac6f2cSLuiz Capitulino {
14771cac6f2cSLuiz Capitulino int nid;
14781cac6f2cSLuiz Capitulino
14791cac6f2cSLuiz Capitulino VM_BUG_ON(!nodes_allowed);
14801cac6f2cSLuiz Capitulino
14811cac6f2cSLuiz Capitulino nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
14821cac6f2cSLuiz Capitulino h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
14831cac6f2cSLuiz Capitulino
14841cac6f2cSLuiz Capitulino return nid;
14851cac6f2cSLuiz Capitulino }
14861cac6f2cSLuiz Capitulino
14871cac6f2cSLuiz Capitulino /*
148810c6ec49SMike Kravetz * helper for remove_pool_huge_page() - return the previously saved
14891cac6f2cSLuiz Capitulino * node ["this node"] from which to free a huge page. Advance the
14901cac6f2cSLuiz Capitulino * next node id whether or not we find a free huge page to free so
14911cac6f2cSLuiz Capitulino * that the next attempt to free addresses the next node.
14921cac6f2cSLuiz Capitulino */
hstate_next_node_to_free(struct hstate * h,nodemask_t * nodes_allowed)14931cac6f2cSLuiz Capitulino static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
14941cac6f2cSLuiz Capitulino {
14951cac6f2cSLuiz Capitulino int nid;
14961cac6f2cSLuiz Capitulino
14971cac6f2cSLuiz Capitulino VM_BUG_ON(!nodes_allowed);
14981cac6f2cSLuiz Capitulino
14991cac6f2cSLuiz Capitulino nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
15001cac6f2cSLuiz Capitulino h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
15011cac6f2cSLuiz Capitulino
15021cac6f2cSLuiz Capitulino return nid;
15031cac6f2cSLuiz Capitulino }
15041cac6f2cSLuiz Capitulino
15051cac6f2cSLuiz Capitulino #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
15061cac6f2cSLuiz Capitulino for (nr_nodes = nodes_weight(*mask); \
15071cac6f2cSLuiz Capitulino nr_nodes > 0 && \
15081cac6f2cSLuiz Capitulino ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
15091cac6f2cSLuiz Capitulino nr_nodes--)
15101cac6f2cSLuiz Capitulino
15111cac6f2cSLuiz Capitulino #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
15121cac6f2cSLuiz Capitulino for (nr_nodes = nodes_weight(*mask); \
15131cac6f2cSLuiz Capitulino nr_nodes > 0 && \
15141cac6f2cSLuiz Capitulino ((node = hstate_next_node_to_free(hs, mask)) || 1); \
15151cac6f2cSLuiz Capitulino nr_nodes--)
15161cac6f2cSLuiz Capitulino
15178531fc6fSMike Kravetz /* used to demote non-gigantic_huge pages as well */
__destroy_compound_gigantic_folio(struct folio * folio,unsigned int order,bool demote)1518911565b8SSidhartha Kumar static void __destroy_compound_gigantic_folio(struct folio *folio,
151934d9e35bSMike Kravetz unsigned int order, bool demote)
1520944d9fecSLuiz Capitulino {
1521944d9fecSLuiz Capitulino int i;
1522944d9fecSLuiz Capitulino int nr_pages = 1 << order;
152314455eabSCheng Li struct page *p;
1524944d9fecSLuiz Capitulino
152546f27228SMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, 0);
1526eec20426SMatthew Wilcox (Oracle) atomic_set(&folio->_nr_pages_mapped, 0);
152794688e8eSMatthew Wilcox (Oracle) atomic_set(&folio->_pincount, 0);
152847e29d32SJohn Hubbard
152914455eabSCheng Li for (i = 1; i < nr_pages; i++) {
1530911565b8SSidhartha Kumar p = folio_page(folio, i);
15316c141973SMike Kravetz p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
1532a01f4390SMike Kravetz p->mapping = NULL;
15331d798ca3SKirill A. Shutemov clear_compound_head(p);
153434d9e35bSMike Kravetz if (!demote)
1535944d9fecSLuiz Capitulino set_page_refcounted(p);
1536944d9fecSLuiz Capitulino }
1537944d9fecSLuiz Capitulino
1538911565b8SSidhartha Kumar __folio_clear_head(folio);
1539944d9fecSLuiz Capitulino }
1540944d9fecSLuiz Capitulino
destroy_compound_hugetlb_folio_for_demote(struct folio * folio,unsigned int order)1541911565b8SSidhartha Kumar static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
15428531fc6fSMike Kravetz unsigned int order)
15438531fc6fSMike Kravetz {
1544911565b8SSidhartha Kumar __destroy_compound_gigantic_folio(folio, order, true);
15458531fc6fSMike Kravetz }
15468531fc6fSMike Kravetz
15478531fc6fSMike Kravetz #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
destroy_compound_gigantic_folio(struct folio * folio,unsigned int order)1548911565b8SSidhartha Kumar static void destroy_compound_gigantic_folio(struct folio *folio,
154934d9e35bSMike Kravetz unsigned int order)
155034d9e35bSMike Kravetz {
1551911565b8SSidhartha Kumar __destroy_compound_gigantic_folio(folio, order, false);
155234d9e35bSMike Kravetz }
155334d9e35bSMike Kravetz
free_gigantic_folio(struct folio * folio,unsigned int order)15547f325a8dSSidhartha Kumar static void free_gigantic_folio(struct folio *folio, unsigned int order)
1555944d9fecSLuiz Capitulino {
1556cf11e85fSRoman Gushchin /*
1557cf11e85fSRoman Gushchin * If the page isn't allocated using the cma allocator,
1558cf11e85fSRoman Gushchin * cma_release() returns false.
1559cf11e85fSRoman Gushchin */
1560dbda8feaSBarry Song #ifdef CONFIG_CMA
15617f325a8dSSidhartha Kumar int nid = folio_nid(folio);
15627f325a8dSSidhartha Kumar
15637f325a8dSSidhartha Kumar if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1564cf11e85fSRoman Gushchin return;
1565dbda8feaSBarry Song #endif
1566cf11e85fSRoman Gushchin
15677f325a8dSSidhartha Kumar free_contig_range(folio_pfn(folio), 1 << order);
1568944d9fecSLuiz Capitulino }
1569944d9fecSLuiz Capitulino
15704eb0716eSAlexandre Ghiti #ifdef CONFIG_CONTIG_ALLOC
alloc_gigantic_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)157119fc1a7eSSidhartha Kumar static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1572d9cc948fSMichal Hocko int nid, nodemask_t *nodemask)
1573944d9fecSLuiz Capitulino {
157419fc1a7eSSidhartha Kumar struct page *page;
157504adbc3fSMiaohe Lin unsigned long nr_pages = pages_per_huge_page(h);
1576953f064aSLi Xinhai if (nid == NUMA_NO_NODE)
1577953f064aSLi Xinhai nid = numa_mem_id();
1578944d9fecSLuiz Capitulino
1579dbda8feaSBarry Song #ifdef CONFIG_CMA
1580dbda8feaSBarry Song {
1581cf11e85fSRoman Gushchin int node;
1582cf11e85fSRoman Gushchin
1583953f064aSLi Xinhai if (hugetlb_cma[nid]) {
1584953f064aSLi Xinhai page = cma_alloc(hugetlb_cma[nid], nr_pages,
1585953f064aSLi Xinhai huge_page_order(h), true);
1586953f064aSLi Xinhai if (page)
158719fc1a7eSSidhartha Kumar return page_folio(page);
1588953f064aSLi Xinhai }
1589953f064aSLi Xinhai
1590953f064aSLi Xinhai if (!(gfp_mask & __GFP_THISNODE)) {
1591cf11e85fSRoman Gushchin for_each_node_mask(node, *nodemask) {
1592953f064aSLi Xinhai if (node == nid || !hugetlb_cma[node])
1593cf11e85fSRoman Gushchin continue;
1594cf11e85fSRoman Gushchin
1595cf11e85fSRoman Gushchin page = cma_alloc(hugetlb_cma[node], nr_pages,
1596cf11e85fSRoman Gushchin huge_page_order(h), true);
1597cf11e85fSRoman Gushchin if (page)
159819fc1a7eSSidhartha Kumar return page_folio(page);
1599cf11e85fSRoman Gushchin }
1600cf11e85fSRoman Gushchin }
1601953f064aSLi Xinhai }
1602dbda8feaSBarry Song #endif
1603cf11e85fSRoman Gushchin
160419fc1a7eSSidhartha Kumar page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
160519fc1a7eSSidhartha Kumar return page ? page_folio(page) : NULL;
1606944d9fecSLuiz Capitulino }
1607944d9fecSLuiz Capitulino
16084eb0716eSAlexandre Ghiti #else /* !CONFIG_CONTIG_ALLOC */
alloc_gigantic_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)160919fc1a7eSSidhartha Kumar static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
16104eb0716eSAlexandre Ghiti int nid, nodemask_t *nodemask)
16114eb0716eSAlexandre Ghiti {
16124eb0716eSAlexandre Ghiti return NULL;
16134eb0716eSAlexandre Ghiti }
16144eb0716eSAlexandre Ghiti #endif /* CONFIG_CONTIG_ALLOC */
1615944d9fecSLuiz Capitulino
1616e1073d1eSAneesh Kumar K.V #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
alloc_gigantic_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)161719fc1a7eSSidhartha Kumar static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
16184eb0716eSAlexandre Ghiti int nid, nodemask_t *nodemask)
16194eb0716eSAlexandre Ghiti {
16204eb0716eSAlexandre Ghiti return NULL;
16214eb0716eSAlexandre Ghiti }
free_gigantic_folio(struct folio * folio,unsigned int order)16227f325a8dSSidhartha Kumar static inline void free_gigantic_folio(struct folio *folio,
16237f325a8dSSidhartha Kumar unsigned int order) { }
destroy_compound_gigantic_folio(struct folio * folio,unsigned int order)1624911565b8SSidhartha Kumar static inline void destroy_compound_gigantic_folio(struct folio *folio,
1625d00181b9SKirill A. Shutemov unsigned int order) { }
1626944d9fecSLuiz Capitulino #endif
1627944d9fecSLuiz Capitulino
__clear_hugetlb_destructor(struct hstate * h,struct folio * folio)162832c87719SMike Kravetz static inline void __clear_hugetlb_destructor(struct hstate *h,
162932c87719SMike Kravetz struct folio *folio)
163032c87719SMike Kravetz {
163132c87719SMike Kravetz lockdep_assert_held(&hugetlb_lock);
163232c87719SMike Kravetz
16332431b5f2SMatthew Wilcox (Oracle) __folio_clear_hugetlb(folio);
163432c87719SMike Kravetz }
163532c87719SMike Kravetz
163632c87719SMike Kravetz /*
163732c87719SMike Kravetz * Remove hugetlb folio from lists.
163832c87719SMike Kravetz * If vmemmap exists for the folio, update dtor so that the folio appears
163932c87719SMike Kravetz * as just a compound page. Otherwise, wait until after allocating vmemmap
164032c87719SMike Kravetz * to update dtor.
164134d9e35bSMike Kravetz *
1642cfd5082bSSidhartha Kumar * A reference is held on the folio, except in the case of demote.
16436eb4e88aSMike Kravetz *
16446eb4e88aSMike Kravetz * Must be called with hugetlb lock held.
16456eb4e88aSMike Kravetz */
__remove_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus,bool demote)1646cfd5082bSSidhartha Kumar static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
164734d9e35bSMike Kravetz bool adjust_surplus,
164834d9e35bSMike Kravetz bool demote)
16496eb4e88aSMike Kravetz {
1650cfd5082bSSidhartha Kumar int nid = folio_nid(folio);
16516eb4e88aSMike Kravetz
1652f074732dSSidhartha Kumar VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1653f074732dSSidhartha Kumar VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
16546eb4e88aSMike Kravetz
16559487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
16566eb4e88aSMike Kravetz if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
16576eb4e88aSMike Kravetz return;
16586eb4e88aSMike Kravetz
1659cfd5082bSSidhartha Kumar list_del(&folio->lru);
16606eb4e88aSMike Kravetz
1661cfd5082bSSidhartha Kumar if (folio_test_hugetlb_freed(folio)) {
16626eb4e88aSMike Kravetz h->free_huge_pages--;
16636eb4e88aSMike Kravetz h->free_huge_pages_node[nid]--;
16646eb4e88aSMike Kravetz }
16656eb4e88aSMike Kravetz if (adjust_surplus) {
16666eb4e88aSMike Kravetz h->surplus_huge_pages--;
16676eb4e88aSMike Kravetz h->surplus_huge_pages_node[nid]--;
16686eb4e88aSMike Kravetz }
16696eb4e88aSMike Kravetz
1670e32d20c0SMike Kravetz /*
167132c87719SMike Kravetz * We can only clear the hugetlb destructor after allocating vmemmap
167232c87719SMike Kravetz * pages. Otherwise, someone (memory error handling) may try to write
167332c87719SMike Kravetz * to tail struct pages.
167432c87719SMike Kravetz */
167532c87719SMike Kravetz if (!folio_test_hugetlb_vmemmap_optimized(folio))
167632c87719SMike Kravetz __clear_hugetlb_destructor(h, folio);
167732c87719SMike Kravetz
167832c87719SMike Kravetz /*
167934d9e35bSMike Kravetz * In the case of demote we do not ref count the page as it will soon
168034d9e35bSMike Kravetz * be turned into a page of smaller size.
1681e32d20c0SMike Kravetz */
168234d9e35bSMike Kravetz if (!demote)
1683cfd5082bSSidhartha Kumar folio_ref_unfreeze(folio, 1);
16846eb4e88aSMike Kravetz
16856eb4e88aSMike Kravetz h->nr_huge_pages--;
16866eb4e88aSMike Kravetz h->nr_huge_pages_node[nid]--;
16876eb4e88aSMike Kravetz }
16886eb4e88aSMike Kravetz
remove_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1689cfd5082bSSidhartha Kumar static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
169034d9e35bSMike Kravetz bool adjust_surplus)
169134d9e35bSMike Kravetz {
1692cfd5082bSSidhartha Kumar __remove_hugetlb_folio(h, folio, adjust_surplus, false);
169334d9e35bSMike Kravetz }
169434d9e35bSMike Kravetz
remove_hugetlb_folio_for_demote(struct hstate * h,struct folio * folio,bool adjust_surplus)1695cfd5082bSSidhartha Kumar static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
16968531fc6fSMike Kravetz bool adjust_surplus)
16978531fc6fSMike Kravetz {
1698cfd5082bSSidhartha Kumar __remove_hugetlb_folio(h, folio, adjust_surplus, true);
16998531fc6fSMike Kravetz }
17008531fc6fSMike Kravetz
add_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)17012f6c57d6SSidhartha Kumar static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1702ad2fa371SMuchun Song bool adjust_surplus)
1703ad2fa371SMuchun Song {
1704ad2fa371SMuchun Song int zeroed;
17052f6c57d6SSidhartha Kumar int nid = folio_nid(folio);
1706ad2fa371SMuchun Song
17072f6c57d6SSidhartha Kumar VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1708ad2fa371SMuchun Song
1709ad2fa371SMuchun Song lockdep_assert_held(&hugetlb_lock);
1710ad2fa371SMuchun Song
17112f6c57d6SSidhartha Kumar INIT_LIST_HEAD(&folio->lru);
1712ad2fa371SMuchun Song h->nr_huge_pages++;
1713ad2fa371SMuchun Song h->nr_huge_pages_node[nid]++;
1714ad2fa371SMuchun Song
1715ad2fa371SMuchun Song if (adjust_surplus) {
1716ad2fa371SMuchun Song h->surplus_huge_pages++;
1717ad2fa371SMuchun Song h->surplus_huge_pages_node[nid]++;
1718ad2fa371SMuchun Song }
1719ad2fa371SMuchun Song
17202431b5f2SMatthew Wilcox (Oracle) __folio_set_hugetlb(folio);
17212f6c57d6SSidhartha Kumar folio_change_private(folio, NULL);
1722a9e1eab2SMiaohe Lin /*
17232f6c57d6SSidhartha Kumar * We have to set hugetlb_vmemmap_optimized again as above
17242f6c57d6SSidhartha Kumar * folio_change_private(folio, NULL) cleared it.
1725a9e1eab2SMiaohe Lin */
17262f6c57d6SSidhartha Kumar folio_set_hugetlb_vmemmap_optimized(folio);
1727ad2fa371SMuchun Song
1728ad2fa371SMuchun Song /*
17292f6c57d6SSidhartha Kumar * This folio is about to be managed by the hugetlb allocator and
1730b65a4edaSMike Kravetz * should have no users. Drop our reference, and check for others
1731b65a4edaSMike Kravetz * just in case.
1732ad2fa371SMuchun Song */
17332f6c57d6SSidhartha Kumar zeroed = folio_put_testzero(folio);
17342f6c57d6SSidhartha Kumar if (unlikely(!zeroed))
1735b65a4edaSMike Kravetz /*
1736454a00c4SMatthew Wilcox (Oracle) * It is VERY unlikely soneone else has taken a ref
1737454a00c4SMatthew Wilcox (Oracle) * on the folio. In this case, we simply return as
1738454a00c4SMatthew Wilcox (Oracle) * free_huge_folio() will be called when this other ref
1739454a00c4SMatthew Wilcox (Oracle) * is dropped.
1740b65a4edaSMike Kravetz */
1741b65a4edaSMike Kravetz return;
1742b65a4edaSMike Kravetz
17432f6c57d6SSidhartha Kumar arch_clear_hugepage_flags(&folio->page);
1744240d67a8SSidhartha Kumar enqueue_hugetlb_folio(h, folio);
1745ad2fa371SMuchun Song }
1746ad2fa371SMuchun Song
__update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio)17476f6956cfSSidhartha Kumar static void __update_and_free_hugetlb_folio(struct hstate *h,
17486f6956cfSSidhartha Kumar struct folio *folio)
17496af2acb6SAdam Litke {
175032c87719SMike Kravetz bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
1751a5516438SAndi Kleen
17524eb0716eSAlexandre Ghiti if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1753944d9fecSLuiz Capitulino return;
175418229df5SAndy Whitcroft
1755161df60eSNaoya Horiguchi /*
1756161df60eSNaoya Horiguchi * If we don't know which subpages are hwpoisoned, we can't free
1757161df60eSNaoya Horiguchi * the hugepage, so it's leaked intentionally.
1758161df60eSNaoya Horiguchi */
17597f325a8dSSidhartha Kumar if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1760161df60eSNaoya Horiguchi return;
1761161df60eSNaoya Horiguchi
17626f6956cfSSidhartha Kumar if (hugetlb_vmemmap_restore(h, &folio->page)) {
1763ad2fa371SMuchun Song spin_lock_irq(&hugetlb_lock);
1764ad2fa371SMuchun Song /*
1765ad2fa371SMuchun Song * If we cannot allocate vmemmap pages, just refuse to free the
1766ad2fa371SMuchun Song * page and put the page back on the hugetlb free list and treat
1767ad2fa371SMuchun Song * as a surplus page.
1768ad2fa371SMuchun Song */
17697f325a8dSSidhartha Kumar add_hugetlb_folio(h, folio, true);
1770ad2fa371SMuchun Song spin_unlock_irq(&hugetlb_lock);
1771ad2fa371SMuchun Song return;
1772ad2fa371SMuchun Song }
1773ad2fa371SMuchun Song
1774161df60eSNaoya Horiguchi /*
1775161df60eSNaoya Horiguchi * Move PageHWPoison flag from head page to the raw error pages,
1776161df60eSNaoya Horiguchi * which makes any healthy subpages reusable.
1777161df60eSNaoya Horiguchi */
1778911565b8SSidhartha Kumar if (unlikely(folio_test_hwpoison(folio)))
17792ff6ceceSSidhartha Kumar folio_clear_hugetlb_hwpoison(folio);
1780161df60eSNaoya Horiguchi
178132c87719SMike Kravetz /*
178232c87719SMike Kravetz * If vmemmap pages were allocated above, then we need to clear the
178332c87719SMike Kravetz * hugetlb destructor under the hugetlb lock.
178432c87719SMike Kravetz */
178532c87719SMike Kravetz if (clear_dtor) {
178632c87719SMike Kravetz spin_lock_irq(&hugetlb_lock);
178732c87719SMike Kravetz __clear_hugetlb_destructor(h, folio);
178832c87719SMike Kravetz spin_unlock_irq(&hugetlb_lock);
178932c87719SMike Kravetz }
179032c87719SMike Kravetz
1791a01f4390SMike Kravetz /*
1792a01f4390SMike Kravetz * Non-gigantic pages demoted from CMA allocated gigantic pages
17937f325a8dSSidhartha Kumar * need to be given back to CMA in free_gigantic_folio.
1794a01f4390SMike Kravetz */
1795a01f4390SMike Kravetz if (hstate_is_gigantic(h) ||
17962f6c57d6SSidhartha Kumar hugetlb_cma_folio(folio, huge_page_order(h))) {
1797911565b8SSidhartha Kumar destroy_compound_gigantic_folio(folio, huge_page_order(h));
17987f325a8dSSidhartha Kumar free_gigantic_folio(folio, huge_page_order(h));
1799944d9fecSLuiz Capitulino } else {
18006f6956cfSSidhartha Kumar __free_pages(&folio->page, huge_page_order(h));
18016af2acb6SAdam Litke }
1802944d9fecSLuiz Capitulino }
18036af2acb6SAdam Litke
1804b65d4adbSMuchun Song /*
1805d6ef19e2SSidhartha Kumar * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1806b65d4adbSMuchun Song * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1807b65d4adbSMuchun Song * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1808b65d4adbSMuchun Song * the vmemmap pages.
1809b65d4adbSMuchun Song *
1810b65d4adbSMuchun Song * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1811b65d4adbSMuchun Song * freed and frees them one-by-one. As the page->mapping pointer is going
1812b65d4adbSMuchun Song * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1813b65d4adbSMuchun Song * structure of a lockless linked list of huge pages to be freed.
1814b65d4adbSMuchun Song */
1815b65d4adbSMuchun Song static LLIST_HEAD(hpage_freelist);
1816b65d4adbSMuchun Song
free_hpage_workfn(struct work_struct * work)1817b65d4adbSMuchun Song static void free_hpage_workfn(struct work_struct *work)
1818b65d4adbSMuchun Song {
1819b65d4adbSMuchun Song struct llist_node *node;
1820b65d4adbSMuchun Song
1821b65d4adbSMuchun Song node = llist_del_all(&hpage_freelist);
1822b65d4adbSMuchun Song
1823b65d4adbSMuchun Song while (node) {
1824b65d4adbSMuchun Song struct page *page;
1825b65d4adbSMuchun Song struct hstate *h;
1826b65d4adbSMuchun Song
1827b65d4adbSMuchun Song page = container_of((struct address_space **)node,
1828b65d4adbSMuchun Song struct page, mapping);
1829b65d4adbSMuchun Song node = node->next;
1830b65d4adbSMuchun Song page->mapping = NULL;
1831b65d4adbSMuchun Song /*
1832affd26b1SSidhartha Kumar * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1833affd26b1SSidhartha Kumar * folio_hstate() is going to trigger because a previous call to
18349c5ccf2dSMatthew Wilcox (Oracle) * remove_hugetlb_folio() will clear the hugetlb bit, so do
18359c5ccf2dSMatthew Wilcox (Oracle) * not use folio_hstate() directly.
1836b65d4adbSMuchun Song */
1837b65d4adbSMuchun Song h = size_to_hstate(page_size(page));
1838b65d4adbSMuchun Song
18396f6956cfSSidhartha Kumar __update_and_free_hugetlb_folio(h, page_folio(page));
1840b65d4adbSMuchun Song
1841b65d4adbSMuchun Song cond_resched();
1842b65d4adbSMuchun Song }
1843b65d4adbSMuchun Song }
1844b65d4adbSMuchun Song static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1845b65d4adbSMuchun Song
flush_free_hpage_work(struct hstate * h)1846b65d4adbSMuchun Song static inline void flush_free_hpage_work(struct hstate *h)
1847b65d4adbSMuchun Song {
18486213834cSMuchun Song if (hugetlb_vmemmap_optimizable(h))
1849b65d4adbSMuchun Song flush_work(&free_hpage_work);
1850b65d4adbSMuchun Song }
1851b65d4adbSMuchun Song
update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio,bool atomic)1852d6ef19e2SSidhartha Kumar static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1853b65d4adbSMuchun Song bool atomic)
1854b65d4adbSMuchun Song {
1855d6ef19e2SSidhartha Kumar if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
18566f6956cfSSidhartha Kumar __update_and_free_hugetlb_folio(h, folio);
1857b65d4adbSMuchun Song return;
1858b65d4adbSMuchun Song }
1859b65d4adbSMuchun Song
1860b65d4adbSMuchun Song /*
1861b65d4adbSMuchun Song * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1862b65d4adbSMuchun Song *
1863b65d4adbSMuchun Song * Only call schedule_work() if hpage_freelist is previously
1864b65d4adbSMuchun Song * empty. Otherwise, schedule_work() had been called but the workfn
1865b65d4adbSMuchun Song * hasn't retrieved the list yet.
1866b65d4adbSMuchun Song */
1867d6ef19e2SSidhartha Kumar if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1868b65d4adbSMuchun Song schedule_work(&free_hpage_work);
1869b65d4adbSMuchun Song }
1870b65d4adbSMuchun Song
update_and_free_pages_bulk(struct hstate * h,struct list_head * list)187110c6ec49SMike Kravetz static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
187210c6ec49SMike Kravetz {
187310c6ec49SMike Kravetz struct page *page, *t_page;
1874d6ef19e2SSidhartha Kumar struct folio *folio;
187510c6ec49SMike Kravetz
187610c6ec49SMike Kravetz list_for_each_entry_safe(page, t_page, list, lru) {
1877d6ef19e2SSidhartha Kumar folio = page_folio(page);
1878d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, folio, false);
187910c6ec49SMike Kravetz cond_resched();
188010c6ec49SMike Kravetz }
188110c6ec49SMike Kravetz }
188210c6ec49SMike Kravetz
size_to_hstate(unsigned long size)1883e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
1884e5ff2159SAndi Kleen {
1885e5ff2159SAndi Kleen struct hstate *h;
1886e5ff2159SAndi Kleen
1887e5ff2159SAndi Kleen for_each_hstate(h) {
1888e5ff2159SAndi Kleen if (huge_page_size(h) == size)
1889e5ff2159SAndi Kleen return h;
1890e5ff2159SAndi Kleen }
1891e5ff2159SAndi Kleen return NULL;
1892e5ff2159SAndi Kleen }
1893e5ff2159SAndi Kleen
free_huge_folio(struct folio * folio)1894454a00c4SMatthew Wilcox (Oracle) void free_huge_folio(struct folio *folio)
189527a85ef1SDavid Gibson {
1896a5516438SAndi Kleen /*
1897a5516438SAndi Kleen * Can't pass hstate in here because it is called from the
1898a5516438SAndi Kleen * compound page destructor.
1899a5516438SAndi Kleen */
19000356c4b9SSidhartha Kumar struct hstate *h = folio_hstate(folio);
19010356c4b9SSidhartha Kumar int nid = folio_nid(folio);
19020356c4b9SSidhartha Kumar struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
190307443a85SJoonsoo Kim bool restore_reserve;
1904db71ef79SMike Kravetz unsigned long flags;
190527a85ef1SDavid Gibson
19060356c4b9SSidhartha Kumar VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
19070356c4b9SSidhartha Kumar VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
19088ace22bcSYongkai Wu
19090356c4b9SSidhartha Kumar hugetlb_set_folio_subpool(folio, NULL);
19100356c4b9SSidhartha Kumar if (folio_test_anon(folio))
19110356c4b9SSidhartha Kumar __ClearPageAnonExclusive(&folio->page);
19120356c4b9SSidhartha Kumar folio->mapping = NULL;
19130356c4b9SSidhartha Kumar restore_reserve = folio_test_hugetlb_restore_reserve(folio);
19140356c4b9SSidhartha Kumar folio_clear_hugetlb_restore_reserve(folio);
191527a85ef1SDavid Gibson
19161c5ecae3SMike Kravetz /*
1917d6995da3SMike Kravetz * If HPageRestoreReserve was set on page, page allocation consumed a
19180919e1b6SMike Kravetz * reservation. If the page was associated with a subpool, there
19190919e1b6SMike Kravetz * would have been a page reserved in the subpool before allocation
19200919e1b6SMike Kravetz * via hugepage_subpool_get_pages(). Since we are 'restoring' the
19216c26d310SMiaohe Lin * reservation, do not call hugepage_subpool_put_pages() as this will
19220919e1b6SMike Kravetz * remove the reserved page from the subpool.
19230919e1b6SMike Kravetz */
19240919e1b6SMike Kravetz if (!restore_reserve) {
19250919e1b6SMike Kravetz /*
19260919e1b6SMike Kravetz * A return code of zero implies that the subpool will be
19270919e1b6SMike Kravetz * under its minimum size if the reservation is not restored
19280919e1b6SMike Kravetz * after page is free. Therefore, force restore_reserve
19290919e1b6SMike Kravetz * operation.
19301c5ecae3SMike Kravetz */
19311c5ecae3SMike Kravetz if (hugepage_subpool_put_pages(spool, 1) == 0)
19321c5ecae3SMike Kravetz restore_reserve = true;
19330919e1b6SMike Kravetz }
19341c5ecae3SMike Kravetz
1935db71ef79SMike Kravetz spin_lock_irqsave(&hugetlb_lock, flags);
19360356c4b9SSidhartha Kumar folio_clear_hugetlb_migratable(folio);
1937d4ab0316SSidhartha Kumar hugetlb_cgroup_uncharge_folio(hstate_index(h),
1938d4ab0316SSidhartha Kumar pages_per_huge_page(h), folio);
1939d4ab0316SSidhartha Kumar hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1940d4ab0316SSidhartha Kumar pages_per_huge_page(h), folio);
194107443a85SJoonsoo Kim if (restore_reserve)
194207443a85SJoonsoo Kim h->resv_huge_pages++;
194307443a85SJoonsoo Kim
19440356c4b9SSidhartha Kumar if (folio_test_hugetlb_temporary(folio)) {
1945cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, folio, false);
1946db71ef79SMike Kravetz spin_unlock_irqrestore(&hugetlb_lock, flags);
1947d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, folio, true);
1948ab5ac90aSMichal Hocko } else if (h->surplus_huge_pages_node[nid]) {
19490edaecfaSAneesh Kumar K.V /* remove the page from active list */
1950cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, folio, true);
1951db71ef79SMike Kravetz spin_unlock_irqrestore(&hugetlb_lock, flags);
1952d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, folio, true);
19537893d1d5SAdam Litke } else {
1954454a00c4SMatthew Wilcox (Oracle) arch_clear_hugepage_flags(&folio->page);
1955240d67a8SSidhartha Kumar enqueue_hugetlb_folio(h, folio);
1956db71ef79SMike Kravetz spin_unlock_irqrestore(&hugetlb_lock, flags);
195727a85ef1SDavid Gibson }
19581121828aSMike Kravetz }
195927a85ef1SDavid Gibson
1960d3d99fccSOscar Salvador /*
1961d3d99fccSOscar Salvador * Must be called with the hugetlb lock held
1962d3d99fccSOscar Salvador */
__prep_account_new_huge_page(struct hstate * h,int nid)1963d3d99fccSOscar Salvador static void __prep_account_new_huge_page(struct hstate *h, int nid)
1964d3d99fccSOscar Salvador {
1965d3d99fccSOscar Salvador lockdep_assert_held(&hugetlb_lock);
1966d3d99fccSOscar Salvador h->nr_huge_pages++;
1967d3d99fccSOscar Salvador h->nr_huge_pages_node[nid]++;
1968d3d99fccSOscar Salvador }
1969d3d99fccSOscar Salvador
__prep_new_hugetlb_folio(struct hstate * h,struct folio * folio)1970de656ed3SSidhartha Kumar static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1971b7ba30c6SAndi Kleen {
1972de656ed3SSidhartha Kumar hugetlb_vmemmap_optimize(h, &folio->page);
1973de656ed3SSidhartha Kumar INIT_LIST_HEAD(&folio->lru);
19742431b5f2SMatthew Wilcox (Oracle) __folio_set_hugetlb(folio);
1975de656ed3SSidhartha Kumar hugetlb_set_folio_subpool(folio, NULL);
1976de656ed3SSidhartha Kumar set_hugetlb_cgroup(folio, NULL);
1977de656ed3SSidhartha Kumar set_hugetlb_cgroup_rsvd(folio, NULL);
1978d3d99fccSOscar Salvador }
1979d3d99fccSOscar Salvador
prep_new_hugetlb_folio(struct hstate * h,struct folio * folio,int nid)1980d1c60955SSidhartha Kumar static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
1981d3d99fccSOscar Salvador {
1982de656ed3SSidhartha Kumar __prep_new_hugetlb_folio(h, folio);
1983db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
1984d3d99fccSOscar Salvador __prep_account_new_huge_page(h, nid);
1985db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
1986b7ba30c6SAndi Kleen }
1987b7ba30c6SAndi Kleen
__prep_compound_gigantic_folio(struct folio * folio,unsigned int order,bool demote)1988d1c60955SSidhartha Kumar static bool __prep_compound_gigantic_folio(struct folio *folio,
1989d1c60955SSidhartha Kumar unsigned int order, bool demote)
199020a0307cSWu Fengguang {
19917118fc29SMike Kravetz int i, j;
199220a0307cSWu Fengguang int nr_pages = 1 << order;
199314455eabSCheng Li struct page *p;
199420a0307cSWu Fengguang
1995d1c60955SSidhartha Kumar __folio_clear_reserved(folio);
19962b21624fSMike Kravetz for (i = 0; i < nr_pages; i++) {
1997d1c60955SSidhartha Kumar p = folio_page(folio, i);
199814455eabSCheng Li
1999ef5a22beSAndrea Arcangeli /*
2000ef5a22beSAndrea Arcangeli * For gigantic hugepages allocated through bootmem at
2001ef5a22beSAndrea Arcangeli * boot, it's safer to be consistent with the not-gigantic
2002ef5a22beSAndrea Arcangeli * hugepages and clear the PG_reserved bit from all tail pages
20037c8de358SEthon Paul * too. Otherwise drivers using get_user_pages() to access tail
2004ef5a22beSAndrea Arcangeli * pages may get the reference counting wrong if they see
2005ef5a22beSAndrea Arcangeli * PG_reserved set on a tail page (despite the head page not
2006ef5a22beSAndrea Arcangeli * having PG_reserved set). Enforcing this consistency between
2007ef5a22beSAndrea Arcangeli * head and tail pages allows drivers to optimize away a check
2008ef5a22beSAndrea Arcangeli * on the head page when they need know if put_page() is needed
2009ef5a22beSAndrea Arcangeli * after get_user_pages().
2010ef5a22beSAndrea Arcangeli */
20117fb0728aSMike Kravetz if (i != 0) /* head page cleared above */
2012ef5a22beSAndrea Arcangeli __ClearPageReserved(p);
20137118fc29SMike Kravetz /*
20147118fc29SMike Kravetz * Subtle and very unlikely
20157118fc29SMike Kravetz *
20167118fc29SMike Kravetz * Gigantic 'page allocators' such as memblock or cma will
20177118fc29SMike Kravetz * return a set of pages with each page ref counted. We need
20187118fc29SMike Kravetz * to turn this set of pages into a compound page with tail
20197118fc29SMike Kravetz * page ref counts set to zero. Code such as speculative page
20207118fc29SMike Kravetz * cache adding could take a ref on a 'to be' tail page.
20217118fc29SMike Kravetz * We need to respect any increased ref count, and only set
20227118fc29SMike Kravetz * the ref count to zero if count is currently 1. If count
2023416d85edSMike Kravetz * is not 1, we return an error. An error return indicates
2024416d85edSMike Kravetz * the set of pages can not be converted to a gigantic page.
2025416d85edSMike Kravetz * The caller who allocated the pages should then discard the
2026416d85edSMike Kravetz * pages using the appropriate free interface.
202734d9e35bSMike Kravetz *
202834d9e35bSMike Kravetz * In the case of demote, the ref count will be zero.
20297118fc29SMike Kravetz */
203034d9e35bSMike Kravetz if (!demote) {
20317118fc29SMike Kravetz if (!page_ref_freeze(p, 1)) {
2032416d85edSMike Kravetz pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
20337118fc29SMike Kravetz goto out_error;
20347118fc29SMike Kravetz }
203534d9e35bSMike Kravetz } else {
203634d9e35bSMike Kravetz VM_BUG_ON_PAGE(page_count(p), p);
203734d9e35bSMike Kravetz }
20382b21624fSMike Kravetz if (i != 0)
2039d1c60955SSidhartha Kumar set_compound_head(p, &folio->page);
204020a0307cSWu Fengguang }
2041e3b7bf97STarun Sahu __folio_set_head(folio);
2042e3b7bf97STarun Sahu /* we rely on prep_new_hugetlb_folio to set the destructor */
2043e3b7bf97STarun Sahu folio_set_order(folio, order);
204446f27228SMatthew Wilcox (Oracle) atomic_set(&folio->_entire_mapcount, -1);
2045eec20426SMatthew Wilcox (Oracle) atomic_set(&folio->_nr_pages_mapped, 0);
204694688e8eSMatthew Wilcox (Oracle) atomic_set(&folio->_pincount, 0);
20477118fc29SMike Kravetz return true;
20487118fc29SMike Kravetz
20497118fc29SMike Kravetz out_error:
20502b21624fSMike Kravetz /* undo page modifications made above */
20512b21624fSMike Kravetz for (j = 0; j < i; j++) {
2052d1c60955SSidhartha Kumar p = folio_page(folio, j);
20532b21624fSMike Kravetz if (j != 0)
20547118fc29SMike Kravetz clear_compound_head(p);
20557118fc29SMike Kravetz set_page_refcounted(p);
20567118fc29SMike Kravetz }
20577118fc29SMike Kravetz /* need to clear PG_reserved on remaining tail pages */
205814455eabSCheng Li for (; j < nr_pages; j++) {
2059d1c60955SSidhartha Kumar p = folio_page(folio, j);
20607118fc29SMike Kravetz __ClearPageReserved(p);
206114455eabSCheng Li }
20627118fc29SMike Kravetz return false;
206320a0307cSWu Fengguang }
206420a0307cSWu Fengguang
prep_compound_gigantic_folio(struct folio * folio,unsigned int order)2065d1c60955SSidhartha Kumar static bool prep_compound_gigantic_folio(struct folio *folio,
20668531fc6fSMike Kravetz unsigned int order)
20678531fc6fSMike Kravetz {
2068d1c60955SSidhartha Kumar return __prep_compound_gigantic_folio(folio, order, false);
2069d1c60955SSidhartha Kumar }
2070d1c60955SSidhartha Kumar
prep_compound_gigantic_folio_for_demote(struct folio * folio,unsigned int order)2071d1c60955SSidhartha Kumar static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
2072d1c60955SSidhartha Kumar unsigned int order)
2073d1c60955SSidhartha Kumar {
2074d1c60955SSidhartha Kumar return __prep_compound_gigantic_folio(folio, order, true);
20758531fc6fSMike Kravetz }
20768531fc6fSMike Kravetz
20777795912cSAndrew Morton /*
2078c0d0381aSMike Kravetz * Find and lock address space (mapping) in write mode.
2079c0d0381aSMike Kravetz *
2080336bf30eSMike Kravetz * Upon entry, the page is locked which means that page_mapping() is
2081336bf30eSMike Kravetz * stable. Due to locking order, we can only trylock_write. If we can
2082336bf30eSMike Kravetz * not get the lock, simply return NULL to caller.
2083c0d0381aSMike Kravetz */
hugetlb_page_mapping_lock_write(struct page * hpage)2084c0d0381aSMike Kravetz struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2085c0d0381aSMike Kravetz {
2086336bf30eSMike Kravetz struct address_space *mapping = page_mapping(hpage);
2087c0d0381aSMike Kravetz
2088c0d0381aSMike Kravetz if (!mapping)
2089c0d0381aSMike Kravetz return mapping;
2090c0d0381aSMike Kravetz
2091c0d0381aSMike Kravetz if (i_mmap_trylock_write(mapping))
2092c0d0381aSMike Kravetz return mapping;
2093c0d0381aSMike Kravetz
2094c0d0381aSMike Kravetz return NULL;
2095c0d0381aSMike Kravetz }
2096c0d0381aSMike Kravetz
hugetlb_basepage_index(struct page * page)2097fe19bd3dSHugh Dickins pgoff_t hugetlb_basepage_index(struct page *page)
209813d60f4bSZhang Yi {
209913d60f4bSZhang Yi struct page *page_head = compound_head(page);
210013d60f4bSZhang Yi pgoff_t index = page_index(page_head);
210113d60f4bSZhang Yi unsigned long compound_idx;
210213d60f4bSZhang Yi
210323baf831SKirill A. Shutemov if (compound_order(page_head) > MAX_ORDER)
210413d60f4bSZhang Yi compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
210513d60f4bSZhang Yi else
210613d60f4bSZhang Yi compound_idx = page - page_head;
210713d60f4bSZhang Yi
210813d60f4bSZhang Yi return (index << compound_order(page_head)) + compound_idx;
210913d60f4bSZhang Yi }
211013d60f4bSZhang Yi
alloc_buddy_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)211119fc1a7eSSidhartha Kumar static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2112f60858f9SMike Kravetz gfp_t gfp_mask, int nid, nodemask_t *nmask,
2113f60858f9SMike Kravetz nodemask_t *node_alloc_noretry)
21141da177e4SLinus Torvalds {
2115af0fb9dfSMichal Hocko int order = huge_page_order(h);
21161da177e4SLinus Torvalds struct page *page;
2117f60858f9SMike Kravetz bool alloc_try_hard = true;
21182b21624fSMike Kravetz bool retry = true;
2119f96efd58SJoe Jin
2120f60858f9SMike Kravetz /*
2121f60858f9SMike Kravetz * By default we always try hard to allocate the page with
2122f60858f9SMike Kravetz * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2123f60858f9SMike Kravetz * a loop (to adjust global huge page counts) and previous allocation
2124f60858f9SMike Kravetz * failed, do not continue to try hard on the same node. Use the
2125f60858f9SMike Kravetz * node_alloc_noretry bitmap to manage this state information.
2126f60858f9SMike Kravetz */
2127f60858f9SMike Kravetz if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2128f60858f9SMike Kravetz alloc_try_hard = false;
2129f60858f9SMike Kravetz gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2130f60858f9SMike Kravetz if (alloc_try_hard)
2131f60858f9SMike Kravetz gfp_mask |= __GFP_RETRY_MAYFAIL;
2132af0fb9dfSMichal Hocko if (nid == NUMA_NO_NODE)
2133af0fb9dfSMichal Hocko nid = numa_mem_id();
21342b21624fSMike Kravetz retry:
213584172f4bSMatthew Wilcox (Oracle) page = __alloc_pages(gfp_mask, order, nid, nmask);
21362b21624fSMike Kravetz
21372b21624fSMike Kravetz /* Freeze head page */
21382b21624fSMike Kravetz if (page && !page_ref_freeze(page, 1)) {
21392b21624fSMike Kravetz __free_pages(page, order);
21402b21624fSMike Kravetz if (retry) { /* retry once */
21412b21624fSMike Kravetz retry = false;
21422b21624fSMike Kravetz goto retry;
21432b21624fSMike Kravetz }
21442b21624fSMike Kravetz /* WOW! twice in a row. */
21452b21624fSMike Kravetz pr_warn("HugeTLB head page unexpected inflated ref count\n");
21462b21624fSMike Kravetz page = NULL;
21472b21624fSMike Kravetz }
21482b21624fSMike Kravetz
2149f60858f9SMike Kravetz /*
2150f60858f9SMike Kravetz * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2151f60858f9SMike Kravetz * indicates an overall state change. Clear bit so that we resume
2152f60858f9SMike Kravetz * normal 'try hard' allocations.
2153f60858f9SMike Kravetz */
2154f60858f9SMike Kravetz if (node_alloc_noretry && page && !alloc_try_hard)
2155f60858f9SMike Kravetz node_clear(nid, *node_alloc_noretry);
2156f60858f9SMike Kravetz
2157f60858f9SMike Kravetz /*
2158f60858f9SMike Kravetz * If we tried hard to get a page but failed, set bit so that
2159f60858f9SMike Kravetz * subsequent attempts will not try as hard until there is an
2160f60858f9SMike Kravetz * overall state change.
2161f60858f9SMike Kravetz */
2162f60858f9SMike Kravetz if (node_alloc_noretry && !page && alloc_try_hard)
2163f60858f9SMike Kravetz node_set(nid, *node_alloc_noretry);
2164f60858f9SMike Kravetz
216519fc1a7eSSidhartha Kumar if (!page) {
216619fc1a7eSSidhartha Kumar __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
216719fc1a7eSSidhartha Kumar return NULL;
216819fc1a7eSSidhartha Kumar }
216919fc1a7eSSidhartha Kumar
217019fc1a7eSSidhartha Kumar __count_vm_event(HTLB_BUDDY_PGALLOC);
217119fc1a7eSSidhartha Kumar return page_folio(page);
217263b4613cSNishanth Aravamudan }
217363b4613cSNishanth Aravamudan
2174af0fb9dfSMichal Hocko /*
21750c397daeSMichal Hocko * Common helper to allocate a fresh hugetlb page. All specific allocators
21760c397daeSMichal Hocko * should use this function to get new hugetlb pages
21772b21624fSMike Kravetz *
21782b21624fSMike Kravetz * Note that returned page is 'frozen': ref count of head page and all tail
21792b21624fSMike Kravetz * pages is zero.
21800c397daeSMichal Hocko */
alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)218119fc1a7eSSidhartha Kumar static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2182f60858f9SMike Kravetz gfp_t gfp_mask, int nid, nodemask_t *nmask,
2183f60858f9SMike Kravetz nodemask_t *node_alloc_noretry)
21840c397daeSMichal Hocko {
21857f325a8dSSidhartha Kumar struct folio *folio;
21867118fc29SMike Kravetz bool retry = false;
21870c397daeSMichal Hocko
21887118fc29SMike Kravetz retry:
21890c397daeSMichal Hocko if (hstate_is_gigantic(h))
219019fc1a7eSSidhartha Kumar folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
21910c397daeSMichal Hocko else
219219fc1a7eSSidhartha Kumar folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2193f60858f9SMike Kravetz nid, nmask, node_alloc_noretry);
219419fc1a7eSSidhartha Kumar if (!folio)
21950c397daeSMichal Hocko return NULL;
21967118fc29SMike Kravetz if (hstate_is_gigantic(h)) {
2197d1c60955SSidhartha Kumar if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
21987118fc29SMike Kravetz /*
21997118fc29SMike Kravetz * Rare failure to convert pages to compound page.
22007118fc29SMike Kravetz * Free pages and try again - ONCE!
22017118fc29SMike Kravetz */
22027f325a8dSSidhartha Kumar free_gigantic_folio(folio, huge_page_order(h));
22037118fc29SMike Kravetz if (!retry) {
22047118fc29SMike Kravetz retry = true;
22057118fc29SMike Kravetz goto retry;
22067118fc29SMike Kravetz }
22077118fc29SMike Kravetz return NULL;
22087118fc29SMike Kravetz }
22097118fc29SMike Kravetz }
2210d1c60955SSidhartha Kumar prep_new_hugetlb_folio(h, folio, folio_nid(folio));
22110c397daeSMichal Hocko
221219fc1a7eSSidhartha Kumar return folio;
22130c397daeSMichal Hocko }
22140c397daeSMichal Hocko
22150c397daeSMichal Hocko /*
2216af0fb9dfSMichal Hocko * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2217af0fb9dfSMichal Hocko * manner.
2218af0fb9dfSMichal Hocko */
alloc_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry)2219f60858f9SMike Kravetz static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2220f60858f9SMike Kravetz nodemask_t *node_alloc_noretry)
2221b2261026SJoonsoo Kim {
222219fc1a7eSSidhartha Kumar struct folio *folio;
2223b2261026SJoonsoo Kim int nr_nodes, node;
2224af0fb9dfSMichal Hocko gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2225b2261026SJoonsoo Kim
2226b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
222719fc1a7eSSidhartha Kumar folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
222819fc1a7eSSidhartha Kumar nodes_allowed, node_alloc_noretry);
222919fc1a7eSSidhartha Kumar if (folio) {
2230454a00c4SMatthew Wilcox (Oracle) free_huge_folio(folio); /* free it into the hugepage allocator */
223119fc1a7eSSidhartha Kumar return 1;
223219fc1a7eSSidhartha Kumar }
2233b2261026SJoonsoo Kim }
2234b2261026SJoonsoo Kim
2235af0fb9dfSMichal Hocko return 0;
2236b2261026SJoonsoo Kim }
2237b2261026SJoonsoo Kim
2238e8c5c824SLee Schermerhorn /*
223910c6ec49SMike Kravetz * Remove huge page from pool from next node to free. Attempt to keep
224010c6ec49SMike Kravetz * persistent huge pages more or less balanced over allowed nodes.
224110c6ec49SMike Kravetz * This routine only 'removes' the hugetlb page. The caller must make
224210c6ec49SMike Kravetz * an additional call to free the page to low level allocators.
2243e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked.
2244e8c5c824SLee Schermerhorn */
remove_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)224510c6ec49SMike Kravetz static struct page *remove_pool_huge_page(struct hstate *h,
224610c6ec49SMike Kravetz nodemask_t *nodes_allowed,
22476ae11b27SLee Schermerhorn bool acct_surplus)
2248e8c5c824SLee Schermerhorn {
2249b2261026SJoonsoo Kim int nr_nodes, node;
225010c6ec49SMike Kravetz struct page *page = NULL;
2251cfd5082bSSidhartha Kumar struct folio *folio;
2252e8c5c824SLee Schermerhorn
22539487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
2254b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2255685f3457SLee Schermerhorn /*
2256685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine
2257685f3457SLee Schermerhorn * nodes with surplus pages.
2258685f3457SLee Schermerhorn */
2259b2261026SJoonsoo Kim if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2260b2261026SJoonsoo Kim !list_empty(&h->hugepage_freelists[node])) {
226110c6ec49SMike Kravetz page = list_entry(h->hugepage_freelists[node].next,
2262e8c5c824SLee Schermerhorn struct page, lru);
2263cfd5082bSSidhartha Kumar folio = page_folio(page);
2264cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, folio, acct_surplus);
22659a76db09SLee Schermerhorn break;
2266e8c5c824SLee Schermerhorn }
2267b2261026SJoonsoo Kim }
2268e8c5c824SLee Schermerhorn
226910c6ec49SMike Kravetz return page;
2270e8c5c824SLee Schermerhorn }
2271e8c5c824SLee Schermerhorn
2272c8721bbbSNaoya Horiguchi /*
2273c8721bbbSNaoya Horiguchi * Dissolve a given free hugepage into free buddy pages. This function does
2274faf53defSNaoya Horiguchi * nothing for in-use hugepages and non-hugepages.
2275faf53defSNaoya Horiguchi * This function returns values like below:
2276faf53defSNaoya Horiguchi *
2277ad2fa371SMuchun Song * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2278ad2fa371SMuchun Song * when the system is under memory pressure and the feature of
2279ad2fa371SMuchun Song * freeing unused vmemmap pages associated with each hugetlb page
2280ad2fa371SMuchun Song * is enabled.
2281faf53defSNaoya Horiguchi * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2282faf53defSNaoya Horiguchi * (allocated or reserved.)
2283faf53defSNaoya Horiguchi * 0: successfully dissolved free hugepages or the page is not a
2284faf53defSNaoya Horiguchi * hugepage (considered as already dissolved)
2285c8721bbbSNaoya Horiguchi */
dissolve_free_huge_page(struct page * page)2286c3114a84SAnshuman Khandual int dissolve_free_huge_page(struct page *page)
2287c8721bbbSNaoya Horiguchi {
22886bc9b564SNaoya Horiguchi int rc = -EBUSY;
22891a7cdab5SSidhartha Kumar struct folio *folio = page_folio(page);
2290082d5b6bSGerald Schaefer
22917ffddd49SMuchun Song retry:
2292faf53defSNaoya Horiguchi /* Not to disrupt normal path by vainly holding hugetlb_lock */
22931a7cdab5SSidhartha Kumar if (!folio_test_hugetlb(folio))
2294faf53defSNaoya Horiguchi return 0;
2295faf53defSNaoya Horiguchi
2296db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
22971a7cdab5SSidhartha Kumar if (!folio_test_hugetlb(folio)) {
2298faf53defSNaoya Horiguchi rc = 0;
2299faf53defSNaoya Horiguchi goto out;
2300faf53defSNaoya Horiguchi }
2301faf53defSNaoya Horiguchi
23021a7cdab5SSidhartha Kumar if (!folio_ref_count(folio)) {
23031a7cdab5SSidhartha Kumar struct hstate *h = folio_hstate(folio);
23048346d69dSXin Hao if (!available_huge_pages(h))
2305082d5b6bSGerald Schaefer goto out;
23067ffddd49SMuchun Song
23077ffddd49SMuchun Song /*
23087ffddd49SMuchun Song * We should make sure that the page is already on the free list
23097ffddd49SMuchun Song * when it is dissolved.
23107ffddd49SMuchun Song */
23111a7cdab5SSidhartha Kumar if (unlikely(!folio_test_hugetlb_freed(folio))) {
2312db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
23137ffddd49SMuchun Song cond_resched();
23147ffddd49SMuchun Song
23157ffddd49SMuchun Song /*
23167ffddd49SMuchun Song * Theoretically, we should return -EBUSY when we
23177ffddd49SMuchun Song * encounter this race. In fact, we have a chance
23187ffddd49SMuchun Song * to successfully dissolve the page if we do a
23197ffddd49SMuchun Song * retry. Because the race window is quite small.
23207ffddd49SMuchun Song * If we seize this opportunity, it is an optimization
23217ffddd49SMuchun Song * for increasing the success rate of dissolving page.
23227ffddd49SMuchun Song */
23237ffddd49SMuchun Song goto retry;
23247ffddd49SMuchun Song }
23257ffddd49SMuchun Song
2326cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, folio, false);
2327ad2fa371SMuchun Song h->max_huge_pages--;
2328ad2fa371SMuchun Song spin_unlock_irq(&hugetlb_lock);
2329ad2fa371SMuchun Song
2330c3114a84SAnshuman Khandual /*
2331d6ef19e2SSidhartha Kumar * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2332d6ef19e2SSidhartha Kumar * before freeing the page. update_and_free_hugtlb_folio will fail to
2333ad2fa371SMuchun Song * free the page if it can not allocate required vmemmap. We
2334ad2fa371SMuchun Song * need to adjust max_huge_pages if the page is not freed.
2335ad2fa371SMuchun Song * Attempt to allocate vmemmmap here so that we can take
2336ad2fa371SMuchun Song * appropriate action on failure.
2337ad2fa371SMuchun Song */
23381a7cdab5SSidhartha Kumar rc = hugetlb_vmemmap_restore(h, &folio->page);
2339ad2fa371SMuchun Song if (!rc) {
2340d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, folio, false);
2341ad2fa371SMuchun Song } else {
2342ad2fa371SMuchun Song spin_lock_irq(&hugetlb_lock);
23432f6c57d6SSidhartha Kumar add_hugetlb_folio(h, folio, false);
2344ad2fa371SMuchun Song h->max_huge_pages++;
2345ad2fa371SMuchun Song spin_unlock_irq(&hugetlb_lock);
2346ad2fa371SMuchun Song }
2347ad2fa371SMuchun Song
2348ad2fa371SMuchun Song return rc;
2349c8721bbbSNaoya Horiguchi }
2350082d5b6bSGerald Schaefer out:
2351db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
2352082d5b6bSGerald Schaefer return rc;
2353c8721bbbSNaoya Horiguchi }
2354c8721bbbSNaoya Horiguchi
2355c8721bbbSNaoya Horiguchi /*
2356c8721bbbSNaoya Horiguchi * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2357c8721bbbSNaoya Horiguchi * make specified memory blocks removable from the system.
23582247bb33SGerald Schaefer * Note that this will dissolve a free gigantic hugepage completely, if any
23592247bb33SGerald Schaefer * part of it lies within the given range.
2360082d5b6bSGerald Schaefer * Also note that if dissolve_free_huge_page() returns with an error, all
2361082d5b6bSGerald Schaefer * free hugepages that were dissolved before that error are lost.
2362c8721bbbSNaoya Horiguchi */
dissolve_free_huge_pages(unsigned long start_pfn,unsigned long end_pfn)2363082d5b6bSGerald Schaefer int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2364c8721bbbSNaoya Horiguchi {
2365c8721bbbSNaoya Horiguchi unsigned long pfn;
2366eb03aa00SGerald Schaefer struct page *page;
2367082d5b6bSGerald Schaefer int rc = 0;
2368dc2628f3SMuchun Song unsigned int order;
2369dc2628f3SMuchun Song struct hstate *h;
2370c8721bbbSNaoya Horiguchi
2371d0177639SLi Zhong if (!hugepages_supported())
2372082d5b6bSGerald Schaefer return rc;
2373d0177639SLi Zhong
2374dc2628f3SMuchun Song order = huge_page_order(&default_hstate);
2375dc2628f3SMuchun Song for_each_hstate(h)
2376dc2628f3SMuchun Song order = min(order, huge_page_order(h));
2377dc2628f3SMuchun Song
2378dc2628f3SMuchun Song for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2379eb03aa00SGerald Schaefer page = pfn_to_page(pfn);
2380eb03aa00SGerald Schaefer rc = dissolve_free_huge_page(page);
2381eb03aa00SGerald Schaefer if (rc)
2382082d5b6bSGerald Schaefer break;
2383eb03aa00SGerald Schaefer }
2384082d5b6bSGerald Schaefer
2385082d5b6bSGerald Schaefer return rc;
2386c8721bbbSNaoya Horiguchi }
2387c8721bbbSNaoya Horiguchi
2388ab5ac90aSMichal Hocko /*
2389ab5ac90aSMichal Hocko * Allocates a fresh surplus page from the page allocator.
2390ab5ac90aSMichal Hocko */
alloc_surplus_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)23913a740e8bSSidhartha Kumar static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
23923a740e8bSSidhartha Kumar gfp_t gfp_mask, int nid, nodemask_t *nmask)
23937893d1d5SAdam Litke {
239419fc1a7eSSidhartha Kumar struct folio *folio = NULL;
23957893d1d5SAdam Litke
2396bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h))
2397aa888a74SAndi Kleen return NULL;
2398aa888a74SAndi Kleen
2399db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
24009980d744SMichal Hocko if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
24019980d744SMichal Hocko goto out_unlock;
2402db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
2403d1c3fb1fSNishanth Aravamudan
240419fc1a7eSSidhartha Kumar folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
240519fc1a7eSSidhartha Kumar if (!folio)
24060c397daeSMichal Hocko return NULL;
2407d1c3fb1fSNishanth Aravamudan
2408db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
24099980d744SMichal Hocko /*
24109980d744SMichal Hocko * We could have raced with the pool size change.
24119980d744SMichal Hocko * Double check that and simply deallocate the new page
24129980d744SMichal Hocko * if we would end up overcommiting the surpluses. Abuse
2413454a00c4SMatthew Wilcox (Oracle) * temporary page to workaround the nasty free_huge_folio
24149980d744SMichal Hocko * codeflow
24159980d744SMichal Hocko */
24169980d744SMichal Hocko if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
241719fc1a7eSSidhartha Kumar folio_set_hugetlb_temporary(folio);
2418db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
2419454a00c4SMatthew Wilcox (Oracle) free_huge_folio(folio);
24202bf753e6SKai Shen return NULL;
2421b65a4edaSMike Kravetz }
2422b65a4edaSMike Kravetz
24239980d744SMichal Hocko h->surplus_huge_pages++;
242419fc1a7eSSidhartha Kumar h->surplus_huge_pages_node[folio_nid(folio)]++;
24259980d744SMichal Hocko
24269980d744SMichal Hocko out_unlock:
2427db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
24287893d1d5SAdam Litke
24293a740e8bSSidhartha Kumar return folio;
24307893d1d5SAdam Litke }
24317893d1d5SAdam Litke
alloc_migrate_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2432e37d3e83SSidhartha Kumar static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2433ab5ac90aSMichal Hocko int nid, nodemask_t *nmask)
2434ab5ac90aSMichal Hocko {
243519fc1a7eSSidhartha Kumar struct folio *folio;
2436ab5ac90aSMichal Hocko
2437ab5ac90aSMichal Hocko if (hstate_is_gigantic(h))
2438ab5ac90aSMichal Hocko return NULL;
2439ab5ac90aSMichal Hocko
244019fc1a7eSSidhartha Kumar folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
244119fc1a7eSSidhartha Kumar if (!folio)
2442ab5ac90aSMichal Hocko return NULL;
2443ab5ac90aSMichal Hocko
24442b21624fSMike Kravetz /* fresh huge pages are frozen */
244519fc1a7eSSidhartha Kumar folio_ref_unfreeze(folio, 1);
2446ab5ac90aSMichal Hocko /*
2447ab5ac90aSMichal Hocko * We do not account these pages as surplus because they are only
2448ab5ac90aSMichal Hocko * temporary and will be released properly on the last reference
2449ab5ac90aSMichal Hocko */
245019fc1a7eSSidhartha Kumar folio_set_hugetlb_temporary(folio);
2451ab5ac90aSMichal Hocko
2452e37d3e83SSidhartha Kumar return folio;
2453ab5ac90aSMichal Hocko }
2454ab5ac90aSMichal Hocko
2455e4e574b7SAdam Litke /*
2456099730d6SDave Hansen * Use the VMA's mpolicy to allocate a huge page from the buddy.
2457099730d6SDave Hansen */
2458e0ec90eeSDave Hansen static
alloc_buddy_hugetlb_folio_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2459ff7d853bSSidhartha Kumar struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2460099730d6SDave Hansen struct vm_area_struct *vma, unsigned long addr)
2461099730d6SDave Hansen {
24623a740e8bSSidhartha Kumar struct folio *folio = NULL;
2463aaf14e40SMichal Hocko struct mempolicy *mpol;
2464aaf14e40SMichal Hocko gfp_t gfp_mask = htlb_alloc_mask(h);
2465aaf14e40SMichal Hocko int nid;
2466aaf14e40SMichal Hocko nodemask_t *nodemask;
2467aaf14e40SMichal Hocko
2468aaf14e40SMichal Hocko nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2469cfcaa66fSBen Widawsky if (mpol_is_preferred_many(mpol)) {
2470cfcaa66fSBen Widawsky gfp_t gfp = gfp_mask | __GFP_NOWARN;
2471cfcaa66fSBen Widawsky
2472cfcaa66fSBen Widawsky gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
24733a740e8bSSidhartha Kumar folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2474cfcaa66fSBen Widawsky
2475cfcaa66fSBen Widawsky /* Fallback to all nodes if page==NULL */
2476cfcaa66fSBen Widawsky nodemask = NULL;
2477cfcaa66fSBen Widawsky }
2478cfcaa66fSBen Widawsky
24793a740e8bSSidhartha Kumar if (!folio)
24803a740e8bSSidhartha Kumar folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2481aaf14e40SMichal Hocko mpol_cond_put(mpol);
2482ff7d853bSSidhartha Kumar return folio;
2483099730d6SDave Hansen }
2484099730d6SDave Hansen
2485e37d3e83SSidhartha Kumar /* folio migration callback function */
alloc_hugetlb_folio_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)2486e37d3e83SSidhartha Kumar struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2487d92bbc27SJoonsoo Kim nodemask_t *nmask, gfp_t gfp_mask)
24884db9b2efSMichal Hocko {
2489db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
24908346d69dSXin Hao if (available_huge_pages(h)) {
2491a36f1e90SSidhartha Kumar struct folio *folio;
24923e59fcb0SMichal Hocko
2493a36f1e90SSidhartha Kumar folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2494a36f1e90SSidhartha Kumar preferred_nid, nmask);
2495a36f1e90SSidhartha Kumar if (folio) {
2496db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
2497e37d3e83SSidhartha Kumar return folio;
24984db9b2efSMichal Hocko }
24994db9b2efSMichal Hocko }
2500db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
25014db9b2efSMichal Hocko
2502e37d3e83SSidhartha Kumar return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
25034db9b2efSMichal Hocko }
25044db9b2efSMichal Hocko
2505ebd63723SMichal Hocko /* mempolicy aware migration callback */
alloc_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)2506d0ce0e47SSidhartha Kumar struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
2507389c8178SMichal Hocko unsigned long address)
2508ebd63723SMichal Hocko {
2509ebd63723SMichal Hocko struct mempolicy *mpol;
2510ebd63723SMichal Hocko nodemask_t *nodemask;
2511e37d3e83SSidhartha Kumar struct folio *folio;
2512ebd63723SMichal Hocko gfp_t gfp_mask;
2513ebd63723SMichal Hocko int node;
2514ebd63723SMichal Hocko
2515ebd63723SMichal Hocko gfp_mask = htlb_alloc_mask(h);
2516ebd63723SMichal Hocko node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2517e37d3e83SSidhartha Kumar folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
2518ebd63723SMichal Hocko mpol_cond_put(mpol);
2519ebd63723SMichal Hocko
2520d0ce0e47SSidhartha Kumar return folio;
2521ebd63723SMichal Hocko }
2522ebd63723SMichal Hocko
2523bf50bab2SNaoya Horiguchi /*
252425985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation
2525e4e574b7SAdam Litke * of size 'delta'.
2526e4e574b7SAdam Litke */
gather_surplus_pages(struct hstate * h,long delta)25270a4f3d1bSLiu Xiang static int gather_surplus_pages(struct hstate *h, long delta)
25281b2a1e7bSJules Irenge __must_hold(&hugetlb_lock)
2529e4e574b7SAdam Litke {
253034665341SMiaohe Lin LIST_HEAD(surplus_list);
2531454a00c4SMatthew Wilcox (Oracle) struct folio *folio, *tmp;
25320a4f3d1bSLiu Xiang int ret;
25330a4f3d1bSLiu Xiang long i;
25340a4f3d1bSLiu Xiang long needed, allocated;
253528073b02SHillf Danton bool alloc_ok = true;
2536e4e574b7SAdam Litke
25379487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
2538a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2539ac09b3a1SAdam Litke if (needed <= 0) {
2540a5516438SAndi Kleen h->resv_huge_pages += delta;
2541e4e574b7SAdam Litke return 0;
2542ac09b3a1SAdam Litke }
2543e4e574b7SAdam Litke
2544e4e574b7SAdam Litke allocated = 0;
2545e4e574b7SAdam Litke
2546e4e574b7SAdam Litke ret = -ENOMEM;
2547e4e574b7SAdam Litke retry:
2548db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
2549e4e574b7SAdam Litke for (i = 0; i < needed; i++) {
25503a740e8bSSidhartha Kumar folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
25512b21624fSMike Kravetz NUMA_NO_NODE, NULL);
25523a740e8bSSidhartha Kumar if (!folio) {
255328073b02SHillf Danton alloc_ok = false;
255428073b02SHillf Danton break;
255528073b02SHillf Danton }
25563a740e8bSSidhartha Kumar list_add(&folio->lru, &surplus_list);
255769ed779aSDavid Rientjes cond_resched();
2558e4e574b7SAdam Litke }
255928073b02SHillf Danton allocated += i;
2560e4e574b7SAdam Litke
2561e4e574b7SAdam Litke /*
2562e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed'
2563e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed.
2564e4e574b7SAdam Litke */
2565db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
2566a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) -
2567a5516438SAndi Kleen (h->free_huge_pages + allocated);
256828073b02SHillf Danton if (needed > 0) {
256928073b02SHillf Danton if (alloc_ok)
2570e4e574b7SAdam Litke goto retry;
257128073b02SHillf Danton /*
257228073b02SHillf Danton * We were not able to allocate enough pages to
257328073b02SHillf Danton * satisfy the entire reservation so we free what
257428073b02SHillf Danton * we've allocated so far.
257528073b02SHillf Danton */
257628073b02SHillf Danton goto free;
257728073b02SHillf Danton }
2578e4e574b7SAdam Litke /*
2579e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages
258025985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number
2581e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy
2582ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another
2583ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but
2584ac09b3a1SAdam Litke * before they are reserved.
2585e4e574b7SAdam Litke */
2586e4e574b7SAdam Litke needed += allocated;
2587a5516438SAndi Kleen h->resv_huge_pages += delta;
2588e4e574b7SAdam Litke ret = 0;
2589a9869b83SNaoya Horiguchi
259019fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */
2591454a00c4SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
259219fc3f0aSAdam Litke if ((--needed) < 0)
259319fc3f0aSAdam Litke break;
2594b65a4edaSMike Kravetz /* Add the page to the hugetlb allocator */
2595454a00c4SMatthew Wilcox (Oracle) enqueue_hugetlb_folio(h, folio);
259619fc3f0aSAdam Litke }
259728073b02SHillf Danton free:
2598db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
259919fc3f0aSAdam Litke
2600b65a4edaSMike Kravetz /*
2601b65a4edaSMike Kravetz * Free unnecessary surplus pages to the buddy allocator.
2602454a00c4SMatthew Wilcox (Oracle) * Pages have no ref count, call free_huge_folio directly.
2603b65a4edaSMike Kravetz */
2604454a00c4SMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2605454a00c4SMatthew Wilcox (Oracle) free_huge_folio(folio);
2606db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
2607e4e574b7SAdam Litke
2608e4e574b7SAdam Litke return ret;
2609e4e574b7SAdam Litke }
2610e4e574b7SAdam Litke
2611e4e574b7SAdam Litke /*
2612e5bbc8a6SMike Kravetz * This routine has two main purposes:
2613e5bbc8a6SMike Kravetz * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2614e5bbc8a6SMike Kravetz * in unused_resv_pages. This corresponds to the prior adjustments made
2615e5bbc8a6SMike Kravetz * to the associated reservation map.
2616e5bbc8a6SMike Kravetz * 2) Free any unused surplus pages that may have been allocated to satisfy
2617e5bbc8a6SMike Kravetz * the reservation. As many as unused_resv_pages may be freed.
2618e4e574b7SAdam Litke */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)2619a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
2620a5516438SAndi Kleen unsigned long unused_resv_pages)
2621e4e574b7SAdam Litke {
2622e4e574b7SAdam Litke unsigned long nr_pages;
262310c6ec49SMike Kravetz struct page *page;
262410c6ec49SMike Kravetz LIST_HEAD(page_list);
262510c6ec49SMike Kravetz
26269487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
262710c6ec49SMike Kravetz /* Uncommit the reservation */
262810c6ec49SMike Kravetz h->resv_huge_pages -= unused_resv_pages;
2629e4e574b7SAdam Litke
2630c0531714SNaoya Horiguchi if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2631e5bbc8a6SMike Kravetz goto out;
2632aa888a74SAndi Kleen
2633e5bbc8a6SMike Kravetz /*
2634e5bbc8a6SMike Kravetz * Part (or even all) of the reservation could have been backed
2635e5bbc8a6SMike Kravetz * by pre-allocated pages. Only free surplus pages.
2636e5bbc8a6SMike Kravetz */
2637a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2638e4e574b7SAdam Litke
2639685f3457SLee Schermerhorn /*
2640685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread
26419b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes
26429b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs
26439b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages.
264410c6ec49SMike Kravetz * remove_pool_huge_page() will balance the freed pages across the
26459b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting.
2646685f3457SLee Schermerhorn */
2647685f3457SLee Schermerhorn while (nr_pages--) {
264810c6ec49SMike Kravetz page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
264910c6ec49SMike Kravetz if (!page)
2650e5bbc8a6SMike Kravetz goto out;
265110c6ec49SMike Kravetz
265210c6ec49SMike Kravetz list_add(&page->lru, &page_list);
2653e4e574b7SAdam Litke }
2654e5bbc8a6SMike Kravetz
2655e5bbc8a6SMike Kravetz out:
2656db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
265710c6ec49SMike Kravetz update_and_free_pages_bulk(h, &page_list);
2658db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
2659e4e574b7SAdam Litke }
2660e4e574b7SAdam Litke
26615e911373SMike Kravetz
2662c37f9fb1SAndy Whitcroft /*
2663feba16e2SMike Kravetz * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
26645e911373SMike Kravetz * are used by the huge page allocation routines to manage reservations.
2665cf3ad20bSMike Kravetz *
2666cf3ad20bSMike Kravetz * vma_needs_reservation is called to determine if the huge page at addr
2667cf3ad20bSMike Kravetz * within the vma has an associated reservation. If a reservation is
2668cf3ad20bSMike Kravetz * needed, the value 1 is returned. The caller is then responsible for
2669cf3ad20bSMike Kravetz * managing the global reservation and subpool usage counts. After
2670cf3ad20bSMike Kravetz * the huge page has been allocated, vma_commit_reservation is called
2671feba16e2SMike Kravetz * to add the page to the reservation map. If the page allocation fails,
2672feba16e2SMike Kravetz * the reservation must be ended instead of committed. vma_end_reservation
2673feba16e2SMike Kravetz * is called in such cases.
2674cf3ad20bSMike Kravetz *
2675cf3ad20bSMike Kravetz * In the normal case, vma_commit_reservation returns the same value
2676cf3ad20bSMike Kravetz * as the preceding vma_needs_reservation call. The only time this
2677cf3ad20bSMike Kravetz * is not the case is if a reserve map was changed between calls. It
2678cf3ad20bSMike Kravetz * is the responsibility of the caller to notice the difference and
2679cf3ad20bSMike Kravetz * take appropriate action.
268096b96a96SMike Kravetz *
268196b96a96SMike Kravetz * vma_add_reservation is used in error paths where a reservation must
268296b96a96SMike Kravetz * be restored when a newly allocated huge page must be freed. It is
268396b96a96SMike Kravetz * to be called after calling vma_needs_reservation to determine if a
268496b96a96SMike Kravetz * reservation exists.
2685846be085SMike Kravetz *
2686846be085SMike Kravetz * vma_del_reservation is used in error paths where an entry in the reserve
2687846be085SMike Kravetz * map was created during huge page allocation and must be removed. It is to
2688846be085SMike Kravetz * be called after calling vma_needs_reservation to determine if a reservation
2689846be085SMike Kravetz * exists.
2690c37f9fb1SAndy Whitcroft */
26915e911373SMike Kravetz enum vma_resv_mode {
26925e911373SMike Kravetz VMA_NEEDS_RESV,
26935e911373SMike Kravetz VMA_COMMIT_RESV,
2694feba16e2SMike Kravetz VMA_END_RESV,
269596b96a96SMike Kravetz VMA_ADD_RESV,
2696846be085SMike Kravetz VMA_DEL_RESV,
26975e911373SMike Kravetz };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2698cf3ad20bSMike Kravetz static long __vma_reservation_common(struct hstate *h,
2699cf3ad20bSMike Kravetz struct vm_area_struct *vma, unsigned long addr,
27005e911373SMike Kravetz enum vma_resv_mode mode)
2701c37f9fb1SAndy Whitcroft {
27024e35f483SJoonsoo Kim struct resv_map *resv;
27034e35f483SJoonsoo Kim pgoff_t idx;
2704cf3ad20bSMike Kravetz long ret;
27050db9d74eSMina Almasry long dummy_out_regions_needed;
2706c37f9fb1SAndy Whitcroft
27074e35f483SJoonsoo Kim resv = vma_resv_map(vma);
27084e35f483SJoonsoo Kim if (!resv)
2709c37f9fb1SAndy Whitcroft return 1;
2710c37f9fb1SAndy Whitcroft
27114e35f483SJoonsoo Kim idx = vma_hugecache_offset(h, vma, addr);
27125e911373SMike Kravetz switch (mode) {
27135e911373SMike Kravetz case VMA_NEEDS_RESV:
27140db9d74eSMina Almasry ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
27150db9d74eSMina Almasry /* We assume that vma_reservation_* routines always operate on
27160db9d74eSMina Almasry * 1 page, and that adding to resv map a 1 page entry can only
27170db9d74eSMina Almasry * ever require 1 region.
27180db9d74eSMina Almasry */
27190db9d74eSMina Almasry VM_BUG_ON(dummy_out_regions_needed != 1);
27205e911373SMike Kravetz break;
27215e911373SMike Kravetz case VMA_COMMIT_RESV:
2722075a61d0SMina Almasry ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
27230db9d74eSMina Almasry /* region_add calls of range 1 should never fail. */
27240db9d74eSMina Almasry VM_BUG_ON(ret < 0);
27255e911373SMike Kravetz break;
2726feba16e2SMike Kravetz case VMA_END_RESV:
27270db9d74eSMina Almasry region_abort(resv, idx, idx + 1, 1);
27285e911373SMike Kravetz ret = 0;
27295e911373SMike Kravetz break;
273096b96a96SMike Kravetz case VMA_ADD_RESV:
27310db9d74eSMina Almasry if (vma->vm_flags & VM_MAYSHARE) {
2732075a61d0SMina Almasry ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
27330db9d74eSMina Almasry /* region_add calls of range 1 should never fail. */
27340db9d74eSMina Almasry VM_BUG_ON(ret < 0);
27350db9d74eSMina Almasry } else {
27360db9d74eSMina Almasry region_abort(resv, idx, idx + 1, 1);
273796b96a96SMike Kravetz ret = region_del(resv, idx, idx + 1);
273896b96a96SMike Kravetz }
273996b96a96SMike Kravetz break;
2740846be085SMike Kravetz case VMA_DEL_RESV:
2741846be085SMike Kravetz if (vma->vm_flags & VM_MAYSHARE) {
2742846be085SMike Kravetz region_abort(resv, idx, idx + 1, 1);
2743846be085SMike Kravetz ret = region_del(resv, idx, idx + 1);
2744846be085SMike Kravetz } else {
2745846be085SMike Kravetz ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2746846be085SMike Kravetz /* region_add calls of range 1 should never fail. */
2747846be085SMike Kravetz VM_BUG_ON(ret < 0);
2748846be085SMike Kravetz }
2749846be085SMike Kravetz break;
27505e911373SMike Kravetz default:
27515e911373SMike Kravetz BUG();
27525e911373SMike Kravetz }
275384afd99bSAndy Whitcroft
2754846be085SMike Kravetz if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2755cf3ad20bSMike Kravetz return ret;
275667961f9dSMike Kravetz /*
2757bf3d12b9SMiaohe Lin * We know private mapping must have HPAGE_RESV_OWNER set.
2758bf3d12b9SMiaohe Lin *
275967961f9dSMike Kravetz * In most cases, reserves always exist for private mappings.
276067961f9dSMike Kravetz * However, a file associated with mapping could have been
276167961f9dSMike Kravetz * hole punched or truncated after reserves were consumed.
276267961f9dSMike Kravetz * As subsequent fault on such a range will not use reserves.
276367961f9dSMike Kravetz * Subtle - The reserve map for private mappings has the
276467961f9dSMike Kravetz * opposite meaning than that of shared mappings. If NO
276567961f9dSMike Kravetz * entry is in the reserve map, it means a reservation exists.
276667961f9dSMike Kravetz * If an entry exists in the reserve map, it means the
276767961f9dSMike Kravetz * reservation has already been consumed. As a result, the
276867961f9dSMike Kravetz * return value of this routine is the opposite of the
276967961f9dSMike Kravetz * value returned from reserve map manipulation routines above.
277067961f9dSMike Kravetz */
2771bf3d12b9SMiaohe Lin if (ret > 0)
277267961f9dSMike Kravetz return 0;
2773bf3d12b9SMiaohe Lin if (ret == 0)
277467961f9dSMike Kravetz return 1;
2775bf3d12b9SMiaohe Lin return ret;
277684afd99bSAndy Whitcroft }
2777cf3ad20bSMike Kravetz
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2778cf3ad20bSMike Kravetz static long vma_needs_reservation(struct hstate *h,
2779a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr)
2780c37f9fb1SAndy Whitcroft {
27815e911373SMike Kravetz return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2782cf3ad20bSMike Kravetz }
2783c37f9fb1SAndy Whitcroft
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2784cf3ad20bSMike Kravetz static long vma_commit_reservation(struct hstate *h,
2785cf3ad20bSMike Kravetz struct vm_area_struct *vma, unsigned long addr)
2786cf3ad20bSMike Kravetz {
27875e911373SMike Kravetz return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
27885e911373SMike Kravetz }
27895e911373SMike Kravetz
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2790feba16e2SMike Kravetz static void vma_end_reservation(struct hstate *h,
27915e911373SMike Kravetz struct vm_area_struct *vma, unsigned long addr)
27925e911373SMike Kravetz {
2793feba16e2SMike Kravetz (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2794c37f9fb1SAndy Whitcroft }
2795c37f9fb1SAndy Whitcroft
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)279696b96a96SMike Kravetz static long vma_add_reservation(struct hstate *h,
279796b96a96SMike Kravetz struct vm_area_struct *vma, unsigned long addr)
279896b96a96SMike Kravetz {
279996b96a96SMike Kravetz return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
280096b96a96SMike Kravetz }
280196b96a96SMike Kravetz
vma_del_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2802846be085SMike Kravetz static long vma_del_reservation(struct hstate *h,
2803846be085SMike Kravetz struct vm_area_struct *vma, unsigned long addr)
280496b96a96SMike Kravetz {
2805846be085SMike Kravetz return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2806846be085SMike Kravetz }
2807846be085SMike Kravetz
2808846be085SMike Kravetz /*
2809846be085SMike Kravetz * This routine is called to restore reservation information on error paths.
2810d0ce0e47SSidhartha Kumar * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2811d0ce0e47SSidhartha Kumar * and the hugetlb mutex should remain held when calling this routine.
2812846be085SMike Kravetz *
2813846be085SMike Kravetz * It handles two specific cases:
2814d2d7bb44SSidhartha Kumar * 1) A reservation was in place and the folio consumed the reservation.
2815d2d7bb44SSidhartha Kumar * hugetlb_restore_reserve is set in the folio.
2816d2d7bb44SSidhartha Kumar * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2817d0ce0e47SSidhartha Kumar * not set. However, alloc_hugetlb_folio always updates the reserve map.
2818846be085SMike Kravetz *
2819454a00c4SMatthew Wilcox (Oracle) * In case 1, free_huge_folio later in the error path will increment the
2820454a00c4SMatthew Wilcox (Oracle) * global reserve count. But, free_huge_folio does not have enough context
2821846be085SMike Kravetz * to adjust the reservation map. This case deals primarily with private
2822846be085SMike Kravetz * mappings. Adjust the reserve map here to be consistent with global
2823454a00c4SMatthew Wilcox (Oracle) * reserve count adjustments to be made by free_huge_folio. Make sure the
2824846be085SMike Kravetz * reserve map indicates there is a reservation present.
2825846be085SMike Kravetz *
2826d0ce0e47SSidhartha Kumar * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2827846be085SMike Kravetz */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct folio * folio)2828846be085SMike Kravetz void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2829d2d7bb44SSidhartha Kumar unsigned long address, struct folio *folio)
2830846be085SMike Kravetz {
283196b96a96SMike Kravetz long rc = vma_needs_reservation(h, vma, address);
283296b96a96SMike Kravetz
28330ffdc38eSSidhartha Kumar if (folio_test_hugetlb_restore_reserve(folio)) {
2834846be085SMike Kravetz if (unlikely(rc < 0))
283596b96a96SMike Kravetz /*
283696b96a96SMike Kravetz * Rare out of memory condition in reserve map
28370ffdc38eSSidhartha Kumar * manipulation. Clear hugetlb_restore_reserve so
28380ffdc38eSSidhartha Kumar * that global reserve count will not be incremented
2839454a00c4SMatthew Wilcox (Oracle) * by free_huge_folio. This will make it appear
28400ffdc38eSSidhartha Kumar * as though the reservation for this folio was
284196b96a96SMike Kravetz * consumed. This may prevent the task from
28420ffdc38eSSidhartha Kumar * faulting in the folio at a later time. This
284396b96a96SMike Kravetz * is better than inconsistent global huge page
284496b96a96SMike Kravetz * accounting of reserve counts.
284596b96a96SMike Kravetz */
28460ffdc38eSSidhartha Kumar folio_clear_hugetlb_restore_reserve(folio);
2847846be085SMike Kravetz else if (rc)
2848846be085SMike Kravetz (void)vma_add_reservation(h, vma, address);
2849846be085SMike Kravetz else
2850846be085SMike Kravetz vma_end_reservation(h, vma, address);
2851846be085SMike Kravetz } else {
2852846be085SMike Kravetz if (!rc) {
285396b96a96SMike Kravetz /*
2854846be085SMike Kravetz * This indicates there is an entry in the reserve map
2855d0ce0e47SSidhartha Kumar * not added by alloc_hugetlb_folio. We know it was added
2856d0ce0e47SSidhartha Kumar * before the alloc_hugetlb_folio call, otherwise
28570ffdc38eSSidhartha Kumar * hugetlb_restore_reserve would be set on the folio.
2858846be085SMike Kravetz * Remove the entry so that a subsequent allocation
2859846be085SMike Kravetz * does not consume a reservation.
286096b96a96SMike Kravetz */
2861846be085SMike Kravetz rc = vma_del_reservation(h, vma, address);
2862846be085SMike Kravetz if (rc < 0)
2863846be085SMike Kravetz /*
2864846be085SMike Kravetz * VERY rare out of memory condition. Since
2865846be085SMike Kravetz * we can not delete the entry, set
28660ffdc38eSSidhartha Kumar * hugetlb_restore_reserve so that the reserve
28670ffdc38eSSidhartha Kumar * count will be incremented when the folio
2868846be085SMike Kravetz * is freed. This reserve will be consumed
2869846be085SMike Kravetz * on a subsequent allocation.
2870846be085SMike Kravetz */
28710ffdc38eSSidhartha Kumar folio_set_hugetlb_restore_reserve(folio);
2872846be085SMike Kravetz } else if (rc < 0) {
2873846be085SMike Kravetz /*
2874846be085SMike Kravetz * Rare out of memory condition from
2875846be085SMike Kravetz * vma_needs_reservation call. Memory allocation is
2876846be085SMike Kravetz * only attempted if a new entry is needed. Therefore,
2877846be085SMike Kravetz * this implies there is not an entry in the
2878846be085SMike Kravetz * reserve map.
2879846be085SMike Kravetz *
2880846be085SMike Kravetz * For shared mappings, no entry in the map indicates
2881846be085SMike Kravetz * no reservation. We are done.
2882846be085SMike Kravetz */
2883846be085SMike Kravetz if (!(vma->vm_flags & VM_MAYSHARE))
2884846be085SMike Kravetz /*
2885846be085SMike Kravetz * For private mappings, no entry indicates
2886846be085SMike Kravetz * a reservation is present. Since we can
28870ffdc38eSSidhartha Kumar * not add an entry, set hugetlb_restore_reserve
28880ffdc38eSSidhartha Kumar * on the folio so reserve count will be
2889846be085SMike Kravetz * incremented when freed. This reserve will
2890846be085SMike Kravetz * be consumed on a subsequent allocation.
2891846be085SMike Kravetz */
28920ffdc38eSSidhartha Kumar folio_set_hugetlb_restore_reserve(folio);
289396b96a96SMike Kravetz } else
2894846be085SMike Kravetz /*
2895846be085SMike Kravetz * No reservation present, do nothing
2896846be085SMike Kravetz */
289796b96a96SMike Kravetz vma_end_reservation(h, vma, address);
289896b96a96SMike Kravetz }
289996b96a96SMike Kravetz }
290096b96a96SMike Kravetz
2901369fa227SOscar Salvador /*
290219fc1a7eSSidhartha Kumar * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
290319fc1a7eSSidhartha Kumar * the old one
2904369fa227SOscar Salvador * @h: struct hstate old page belongs to
290519fc1a7eSSidhartha Kumar * @old_folio: Old folio to dissolve
2906ae37c7ffSOscar Salvador * @list: List to isolate the page in case we need to
2907369fa227SOscar Salvador * Returns 0 on success, otherwise negated error.
2908369fa227SOscar Salvador */
alloc_and_dissolve_hugetlb_folio(struct hstate * h,struct folio * old_folio,struct list_head * list)290919fc1a7eSSidhartha Kumar static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
291019fc1a7eSSidhartha Kumar struct folio *old_folio, struct list_head *list)
2911369fa227SOscar Salvador {
2912369fa227SOscar Salvador gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2913de656ed3SSidhartha Kumar int nid = folio_nid(old_folio);
2914de656ed3SSidhartha Kumar struct folio *new_folio;
2915369fa227SOscar Salvador int ret = 0;
2916369fa227SOscar Salvador
2917369fa227SOscar Salvador /*
291819fc1a7eSSidhartha Kumar * Before dissolving the folio, we need to allocate a new one for the
291919fc1a7eSSidhartha Kumar * pool to remain stable. Here, we allocate the folio and 'prep' it
2920f41f2ed4SMuchun Song * by doing everything but actually updating counters and adding to
2921f41f2ed4SMuchun Song * the pool. This simplifies and let us do most of the processing
2922f41f2ed4SMuchun Song * under the lock.
2923369fa227SOscar Salvador */
292419fc1a7eSSidhartha Kumar new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
292519fc1a7eSSidhartha Kumar if (!new_folio)
2926369fa227SOscar Salvador return -ENOMEM;
2927de656ed3SSidhartha Kumar __prep_new_hugetlb_folio(h, new_folio);
2928369fa227SOscar Salvador
2929369fa227SOscar Salvador retry:
2930369fa227SOscar Salvador spin_lock_irq(&hugetlb_lock);
2931de656ed3SSidhartha Kumar if (!folio_test_hugetlb(old_folio)) {
2932369fa227SOscar Salvador /*
293319fc1a7eSSidhartha Kumar * Freed from under us. Drop new_folio too.
2934369fa227SOscar Salvador */
2935369fa227SOscar Salvador goto free_new;
2936de656ed3SSidhartha Kumar } else if (folio_ref_count(old_folio)) {
29379747b9e9SBaolin Wang bool isolated;
29389747b9e9SBaolin Wang
2939369fa227SOscar Salvador /*
294019fc1a7eSSidhartha Kumar * Someone has grabbed the folio, try to isolate it here.
2941ae37c7ffSOscar Salvador * Fail with -EBUSY if not possible.
2942369fa227SOscar Salvador */
2943ae37c7ffSOscar Salvador spin_unlock_irq(&hugetlb_lock);
29449747b9e9SBaolin Wang isolated = isolate_hugetlb(old_folio, list);
29459747b9e9SBaolin Wang ret = isolated ? 0 : -EBUSY;
2946ae37c7ffSOscar Salvador spin_lock_irq(&hugetlb_lock);
2947369fa227SOscar Salvador goto free_new;
2948de656ed3SSidhartha Kumar } else if (!folio_test_hugetlb_freed(old_folio)) {
2949369fa227SOscar Salvador /*
295019fc1a7eSSidhartha Kumar * Folio's refcount is 0 but it has not been enqueued in the
2951369fa227SOscar Salvador * freelist yet. Race window is small, so we can succeed here if
2952369fa227SOscar Salvador * we retry.
2953369fa227SOscar Salvador */
2954369fa227SOscar Salvador spin_unlock_irq(&hugetlb_lock);
2955369fa227SOscar Salvador cond_resched();
2956369fa227SOscar Salvador goto retry;
2957369fa227SOscar Salvador } else {
2958369fa227SOscar Salvador /*
295919fc1a7eSSidhartha Kumar * Ok, old_folio is still a genuine free hugepage. Remove it from
2960369fa227SOscar Salvador * the freelist and decrease the counters. These will be
2961369fa227SOscar Salvador * incremented again when calling __prep_account_new_huge_page()
2962240d67a8SSidhartha Kumar * and enqueue_hugetlb_folio() for new_folio. The counters will
2963240d67a8SSidhartha Kumar * remain stable since this happens under the lock.
2964369fa227SOscar Salvador */
2965cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, old_folio, false);
2966369fa227SOscar Salvador
2967369fa227SOscar Salvador /*
296819fc1a7eSSidhartha Kumar * Ref count on new_folio is already zero as it was dropped
2969b65a4edaSMike Kravetz * earlier. It can be directly added to the pool free list.
2970369fa227SOscar Salvador */
2971369fa227SOscar Salvador __prep_account_new_huge_page(h, nid);
2972240d67a8SSidhartha Kumar enqueue_hugetlb_folio(h, new_folio);
2973369fa227SOscar Salvador
2974369fa227SOscar Salvador /*
297519fc1a7eSSidhartha Kumar * Folio has been replaced, we can safely free the old one.
2976369fa227SOscar Salvador */
2977369fa227SOscar Salvador spin_unlock_irq(&hugetlb_lock);
2978d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, old_folio, false);
2979369fa227SOscar Salvador }
2980369fa227SOscar Salvador
2981369fa227SOscar Salvador return ret;
2982369fa227SOscar Salvador
2983369fa227SOscar Salvador free_new:
2984369fa227SOscar Salvador spin_unlock_irq(&hugetlb_lock);
298519fc1a7eSSidhartha Kumar /* Folio has a zero ref count, but needs a ref to be freed */
2986de656ed3SSidhartha Kumar folio_ref_unfreeze(new_folio, 1);
2987d6ef19e2SSidhartha Kumar update_and_free_hugetlb_folio(h, new_folio, false);
2988369fa227SOscar Salvador
2989369fa227SOscar Salvador return ret;
2990369fa227SOscar Salvador }
2991369fa227SOscar Salvador
isolate_or_dissolve_huge_page(struct page * page,struct list_head * list)2992ae37c7ffSOscar Salvador int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2993369fa227SOscar Salvador {
2994369fa227SOscar Salvador struct hstate *h;
2995d5e33bd8SSidhartha Kumar struct folio *folio = page_folio(page);
2996ae37c7ffSOscar Salvador int ret = -EBUSY;
2997369fa227SOscar Salvador
2998369fa227SOscar Salvador /*
2999369fa227SOscar Salvador * The page might have been dissolved from under our feet, so make sure
3000369fa227SOscar Salvador * to carefully check the state under the lock.
3001369fa227SOscar Salvador * Return success when racing as if we dissolved the page ourselves.
3002369fa227SOscar Salvador */
3003369fa227SOscar Salvador spin_lock_irq(&hugetlb_lock);
3004d5e33bd8SSidhartha Kumar if (folio_test_hugetlb(folio)) {
3005d5e33bd8SSidhartha Kumar h = folio_hstate(folio);
3006369fa227SOscar Salvador } else {
3007369fa227SOscar Salvador spin_unlock_irq(&hugetlb_lock);
3008369fa227SOscar Salvador return 0;
3009369fa227SOscar Salvador }
3010369fa227SOscar Salvador spin_unlock_irq(&hugetlb_lock);
3011369fa227SOscar Salvador
3012369fa227SOscar Salvador /*
3013369fa227SOscar Salvador * Fence off gigantic pages as there is a cyclic dependency between
3014369fa227SOscar Salvador * alloc_contig_range and them. Return -ENOMEM as this has the effect
3015369fa227SOscar Salvador * of bailing out right away without further retrying.
3016369fa227SOscar Salvador */
3017369fa227SOscar Salvador if (hstate_is_gigantic(h))
3018369fa227SOscar Salvador return -ENOMEM;
3019369fa227SOscar Salvador
30209747b9e9SBaolin Wang if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
3021ae37c7ffSOscar Salvador ret = 0;
3022d5e33bd8SSidhartha Kumar else if (!folio_ref_count(folio))
302319fc1a7eSSidhartha Kumar ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3024ae37c7ffSOscar Salvador
3025ae37c7ffSOscar Salvador return ret;
3026369fa227SOscar Salvador }
3027369fa227SOscar Salvador
alloc_hugetlb_folio(struct vm_area_struct * vma,unsigned long addr,int avoid_reserve)3028d0ce0e47SSidhartha Kumar struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
302904f2cbe3SMel Gorman unsigned long addr, int avoid_reserve)
3030348ea204SAdam Litke {
303190481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma);
3032a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
3033d4ab0316SSidhartha Kumar struct folio *folio;
3034d85f69b0SMike Kravetz long map_chg, map_commit;
3035d85f69b0SMike Kravetz long gbl_chg;
30366d76dcf4SAneesh Kumar K.V int ret, idx;
3037d0ce0e47SSidhartha Kumar struct hugetlb_cgroup *h_cg = NULL;
303808cf9fafSMina Almasry bool deferred_reserve;
30392fc39cecSAdam Litke
30406d76dcf4SAneesh Kumar K.V idx = hstate_index(h);
3041a1e78772SMel Gorman /*
3042d85f69b0SMike Kravetz * Examine the region/reserve map to determine if the process
3043d85f69b0SMike Kravetz * has a reservation for the page to be allocated. A return
3044d85f69b0SMike Kravetz * code of zero indicates a reservation exists (no change).
3045a1e78772SMel Gorman */
3046d85f69b0SMike Kravetz map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3047d85f69b0SMike Kravetz if (map_chg < 0)
304876dcee75SAneesh Kumar K.V return ERR_PTR(-ENOMEM);
3049d85f69b0SMike Kravetz
3050d85f69b0SMike Kravetz /*
3051d85f69b0SMike Kravetz * Processes that did not create the mapping will have no
3052d85f69b0SMike Kravetz * reserves as indicated by the region/reserve map. Check
3053d85f69b0SMike Kravetz * that the allocation will not exceed the subpool limit.
3054d85f69b0SMike Kravetz * Allocations for MAP_NORESERVE mappings also need to be
3055d85f69b0SMike Kravetz * checked against any subpool limit.
3056d85f69b0SMike Kravetz */
3057d85f69b0SMike Kravetz if (map_chg || avoid_reserve) {
3058d85f69b0SMike Kravetz gbl_chg = hugepage_subpool_get_pages(spool, 1);
3059d85f69b0SMike Kravetz if (gbl_chg < 0) {
3060feba16e2SMike Kravetz vma_end_reservation(h, vma, addr);
306176dcee75SAneesh Kumar K.V return ERR_PTR(-ENOSPC);
30625e911373SMike Kravetz }
306390d8b7e6SAdam Litke
3064d85f69b0SMike Kravetz /*
3065d85f69b0SMike Kravetz * Even though there was no reservation in the region/reserve
3066d85f69b0SMike Kravetz * map, there could be reservations associated with the
3067d85f69b0SMike Kravetz * subpool that can be used. This would be indicated if the
3068d85f69b0SMike Kravetz * return value of hugepage_subpool_get_pages() is zero.
3069d85f69b0SMike Kravetz * However, if avoid_reserve is specified we still avoid even
3070d85f69b0SMike Kravetz * the subpool reservations.
3071d85f69b0SMike Kravetz */
3072d85f69b0SMike Kravetz if (avoid_reserve)
3073d85f69b0SMike Kravetz gbl_chg = 1;
3074d85f69b0SMike Kravetz }
3075d85f69b0SMike Kravetz
307608cf9fafSMina Almasry /* If this allocation is not consuming a reservation, charge it now.
307708cf9fafSMina Almasry */
30786501fe5fSMiaohe Lin deferred_reserve = map_chg || avoid_reserve;
307908cf9fafSMina Almasry if (deferred_reserve) {
308008cf9fafSMina Almasry ret = hugetlb_cgroup_charge_cgroup_rsvd(
308108cf9fafSMina Almasry idx, pages_per_huge_page(h), &h_cg);
30828f34af6fSJianyu Zhan if (ret)
30838f34af6fSJianyu Zhan goto out_subpool_put;
308408cf9fafSMina Almasry }
308508cf9fafSMina Almasry
308608cf9fafSMina Almasry ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
308708cf9fafSMina Almasry if (ret)
308808cf9fafSMina Almasry goto out_uncharge_cgroup_reservation;
30898f34af6fSJianyu Zhan
3090db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
3091d85f69b0SMike Kravetz /*
3092d85f69b0SMike Kravetz * glb_chg is passed to indicate whether or not a page must be taken
3093d85f69b0SMike Kravetz * from the global free pool (global change). gbl_chg == 0 indicates
3094d85f69b0SMike Kravetz * a reservation exists for the allocation.
3095d85f69b0SMike Kravetz */
3096ff7d853bSSidhartha Kumar folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3097ff7d853bSSidhartha Kumar if (!folio) {
3098db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
3099ff7d853bSSidhartha Kumar folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3100ff7d853bSSidhartha Kumar if (!folio)
31018f34af6fSJianyu Zhan goto out_uncharge_cgroup;
310212df140fSRik van Riel spin_lock_irq(&hugetlb_lock);
3103a88c7695SNaoya Horiguchi if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
3104ff7d853bSSidhartha Kumar folio_set_hugetlb_restore_reserve(folio);
3105a88c7695SNaoya Horiguchi h->resv_huge_pages--;
3106a88c7695SNaoya Horiguchi }
3107ff7d853bSSidhartha Kumar list_add(&folio->lru, &h->hugepage_activelist);
3108ff7d853bSSidhartha Kumar folio_ref_unfreeze(folio, 1);
310981a6fcaeSJoonsoo Kim /* Fall through */
3110a1e78772SMel Gorman }
3111ff7d853bSSidhartha Kumar
3112ff7d853bSSidhartha Kumar hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
311308cf9fafSMina Almasry /* If allocation is not consuming a reservation, also store the
311408cf9fafSMina Almasry * hugetlb_cgroup pointer on the page.
311508cf9fafSMina Almasry */
311608cf9fafSMina Almasry if (deferred_reserve) {
311708cf9fafSMina Almasry hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3118ff7d853bSSidhartha Kumar h_cg, folio);
311908cf9fafSMina Almasry }
312008cf9fafSMina Almasry
3121db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
3122a1e78772SMel Gorman
3123ff7d853bSSidhartha Kumar hugetlb_set_folio_subpool(folio, spool);
3124a1e78772SMel Gorman
3125d85f69b0SMike Kravetz map_commit = vma_commit_reservation(h, vma, addr);
3126d85f69b0SMike Kravetz if (unlikely(map_chg > map_commit)) {
312733039678SMike Kravetz /*
312833039678SMike Kravetz * The page was added to the reservation map between
312933039678SMike Kravetz * vma_needs_reservation and vma_commit_reservation.
313033039678SMike Kravetz * This indicates a race with hugetlb_reserve_pages.
313133039678SMike Kravetz * Adjust for the subpool count incremented above AND
313233039678SMike Kravetz * in hugetlb_reserve_pages for the same page. Also,
313333039678SMike Kravetz * the reservation count added in hugetlb_reserve_pages
313433039678SMike Kravetz * no longer applies.
313533039678SMike Kravetz */
313633039678SMike Kravetz long rsv_adjust;
313733039678SMike Kravetz
313833039678SMike Kravetz rsv_adjust = hugepage_subpool_put_pages(spool, 1);
313933039678SMike Kravetz hugetlb_acct_memory(h, -rsv_adjust);
3140f6c5d21dSPeter Xu if (deferred_reserve) {
3141f6c5d21dSPeter Xu spin_lock_irq(&hugetlb_lock);
3142d4ab0316SSidhartha Kumar hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3143d4ab0316SSidhartha Kumar pages_per_huge_page(h), folio);
3144f6c5d21dSPeter Xu spin_unlock_irq(&hugetlb_lock);
3145f6c5d21dSPeter Xu }
314633039678SMike Kravetz }
3147d0ce0e47SSidhartha Kumar return folio;
31488f34af6fSJianyu Zhan
31498f34af6fSJianyu Zhan out_uncharge_cgroup:
31508f34af6fSJianyu Zhan hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
315108cf9fafSMina Almasry out_uncharge_cgroup_reservation:
315208cf9fafSMina Almasry if (deferred_reserve)
315308cf9fafSMina Almasry hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
315408cf9fafSMina Almasry h_cg);
31558f34af6fSJianyu Zhan out_subpool_put:
3156d85f69b0SMike Kravetz if (map_chg || avoid_reserve)
31578f34af6fSJianyu Zhan hugepage_subpool_put_pages(spool, 1);
3158feba16e2SMike Kravetz vma_end_reservation(h, vma, addr);
31598f34af6fSJianyu Zhan return ERR_PTR(-ENOSPC);
3160b45b5bd6SDavid Gibson }
3161b45b5bd6SDavid Gibson
3162b5389086SZhenguo Yao int alloc_bootmem_huge_page(struct hstate *h, int nid)
3163e24a1307SAneesh Kumar K.V __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h,int nid)3164b5389086SZhenguo Yao int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3165aa888a74SAndi Kleen {
3166b5389086SZhenguo Yao struct huge_bootmem_page *m = NULL; /* initialize for clang */
3167b2261026SJoonsoo Kim int nr_nodes, node;
3168aa888a74SAndi Kleen
3169b5389086SZhenguo Yao /* do node specific alloc */
3170b5389086SZhenguo Yao if (nid != NUMA_NO_NODE) {
3171b5389086SZhenguo Yao m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3172b5389086SZhenguo Yao 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3173b5389086SZhenguo Yao if (!m)
3174b5389086SZhenguo Yao return 0;
3175b5389086SZhenguo Yao goto found;
3176b5389086SZhenguo Yao }
3177b5389086SZhenguo Yao /* allocate from next node when distributing huge pages */
3178b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3179b5389086SZhenguo Yao m = memblock_alloc_try_nid_raw(
31808b89a116SGrygorii Strashko huge_page_size(h), huge_page_size(h),
318197ad1087SMike Rapoport 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3182aa888a74SAndi Kleen /*
3183aa888a74SAndi Kleen * Use the beginning of the huge page to store the
3184aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem
3185aa888a74SAndi Kleen * puts them into the mem_map).
3186aa888a74SAndi Kleen */
3187b5389086SZhenguo Yao if (!m)
3188b5389086SZhenguo Yao return 0;
3189aa888a74SAndi Kleen goto found;
3190aa888a74SAndi Kleen }
3191aa888a74SAndi Kleen
3192aa888a74SAndi Kleen found:
3193aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */
3194330d6e48SCannon Matthews INIT_LIST_HEAD(&m->list);
3195aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages);
3196aa888a74SAndi Kleen m->hstate = h;
3197aa888a74SAndi Kleen return 1;
3198aa888a74SAndi Kleen }
3199aa888a74SAndi Kleen
320048b8d744SMike Kravetz /*
320148b8d744SMike Kravetz * Put bootmem huge pages into the standard lists after mem_map is up.
320248b8d744SMike Kravetz * Note: This only applies to gigantic (order > MAX_ORDER) pages.
320348b8d744SMike Kravetz */
gather_bootmem_prealloc(void)3204aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
3205aa888a74SAndi Kleen {
3206aa888a74SAndi Kleen struct huge_bootmem_page *m;
3207aa888a74SAndi Kleen
3208aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) {
320940d18ebfSMike Kravetz struct page *page = virt_to_page(m);
32107f325a8dSSidhartha Kumar struct folio *folio = page_folio(page);
3211aa888a74SAndi Kleen struct hstate *h = m->hstate;
3212ee8f248dSBecky Bruce
321348b8d744SMike Kravetz VM_BUG_ON(!hstate_is_gigantic(h));
3214d1c60955SSidhartha Kumar WARN_ON(folio_ref_count(folio) != 1);
3215d1c60955SSidhartha Kumar if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3216d1c60955SSidhartha Kumar WARN_ON(folio_test_reserved(folio));
3217d1c60955SSidhartha Kumar prep_new_hugetlb_folio(h, folio, folio_nid(folio));
3218454a00c4SMatthew Wilcox (Oracle) free_huge_folio(folio); /* add to the hugepage allocator */
32197118fc29SMike Kravetz } else {
3220416d85edSMike Kravetz /* VERY unlikely inflated ref count on a tail page */
32217f325a8dSSidhartha Kumar free_gigantic_folio(folio, huge_page_order(h));
32227118fc29SMike Kravetz }
3223af0fb9dfSMichal Hocko
3224b0320c7bSRafael Aquini /*
322548b8d744SMike Kravetz * We need to restore the 'stolen' pages to totalram_pages
322648b8d744SMike Kravetz * in order to fix confusing memory reports from free(1) and
322748b8d744SMike Kravetz * other side-effects, like CommitLimit going negative.
3228b0320c7bSRafael Aquini */
3229c78a7f36SMiaohe Lin adjust_managed_page_count(page, pages_per_huge_page(h));
3230520495feSCannon Matthews cond_resched();
3231aa888a74SAndi Kleen }
3232aa888a74SAndi Kleen }
hugetlb_hstate_alloc_pages_onenode(struct hstate * h,int nid)3233b5389086SZhenguo Yao static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3234b5389086SZhenguo Yao {
3235b5389086SZhenguo Yao unsigned long i;
3236b5389086SZhenguo Yao char buf[32];
3237b5389086SZhenguo Yao
3238b5389086SZhenguo Yao for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3239b5389086SZhenguo Yao if (hstate_is_gigantic(h)) {
3240b5389086SZhenguo Yao if (!alloc_bootmem_huge_page(h, nid))
3241b5389086SZhenguo Yao break;
3242b5389086SZhenguo Yao } else {
324319fc1a7eSSidhartha Kumar struct folio *folio;
3244b5389086SZhenguo Yao gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3245b5389086SZhenguo Yao
324619fc1a7eSSidhartha Kumar folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3247b5389086SZhenguo Yao &node_states[N_MEMORY], NULL);
324819fc1a7eSSidhartha Kumar if (!folio)
3249b5389086SZhenguo Yao break;
3250454a00c4SMatthew Wilcox (Oracle) free_huge_folio(folio); /* free it into the hugepage allocator */
3251b5389086SZhenguo Yao }
3252b5389086SZhenguo Yao cond_resched();
3253b5389086SZhenguo Yao }
3254b5389086SZhenguo Yao if (i == h->max_huge_pages_node[nid])
3255b5389086SZhenguo Yao return;
3256b5389086SZhenguo Yao
3257b5389086SZhenguo Yao string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3258b5389086SZhenguo Yao pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3259b5389086SZhenguo Yao h->max_huge_pages_node[nid], buf, nid, i);
3260b5389086SZhenguo Yao h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3261b5389086SZhenguo Yao h->max_huge_pages_node[nid] = i;
3262b5389086SZhenguo Yao }
3263aa888a74SAndi Kleen
hugetlb_hstate_alloc_pages(struct hstate * h)32648faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
32651da177e4SLinus Torvalds {
32661da177e4SLinus Torvalds unsigned long i;
3267f60858f9SMike Kravetz nodemask_t *node_alloc_noretry;
3268b5389086SZhenguo Yao bool node_specific_alloc = false;
3269f60858f9SMike Kravetz
3270b5389086SZhenguo Yao /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3271b5389086SZhenguo Yao if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3272b5389086SZhenguo Yao pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3273b5389086SZhenguo Yao return;
3274b5389086SZhenguo Yao }
3275b5389086SZhenguo Yao
3276b5389086SZhenguo Yao /* do node specific alloc */
32770a7a0f6fSPeng Liu for_each_online_node(i) {
3278b5389086SZhenguo Yao if (h->max_huge_pages_node[i] > 0) {
3279b5389086SZhenguo Yao hugetlb_hstate_alloc_pages_onenode(h, i);
3280b5389086SZhenguo Yao node_specific_alloc = true;
3281b5389086SZhenguo Yao }
3282b5389086SZhenguo Yao }
3283b5389086SZhenguo Yao
3284b5389086SZhenguo Yao if (node_specific_alloc)
3285b5389086SZhenguo Yao return;
3286b5389086SZhenguo Yao
3287b5389086SZhenguo Yao /* below will do all node balanced alloc */
3288f60858f9SMike Kravetz if (!hstate_is_gigantic(h)) {
3289f60858f9SMike Kravetz /*
3290f60858f9SMike Kravetz * Bit mask controlling how hard we retry per-node allocations.
3291f60858f9SMike Kravetz * Ignore errors as lower level routines can deal with
3292f60858f9SMike Kravetz * node_alloc_noretry == NULL. If this kmalloc fails at boot
3293f60858f9SMike Kravetz * time, we are likely in bigger trouble.
3294f60858f9SMike Kravetz */
3295f60858f9SMike Kravetz node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3296f60858f9SMike Kravetz GFP_KERNEL);
3297f60858f9SMike Kravetz } else {
3298f60858f9SMike Kravetz /* allocations done at boot time */
3299f60858f9SMike Kravetz node_alloc_noretry = NULL;
3300f60858f9SMike Kravetz }
3301f60858f9SMike Kravetz
3302f60858f9SMike Kravetz /* bit mask controlling how hard we retry per-node allocations */
3303f60858f9SMike Kravetz if (node_alloc_noretry)
3304f60858f9SMike Kravetz nodes_clear(*node_alloc_noretry);
33051da177e4SLinus Torvalds
3306e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) {
3307bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h)) {
3308b5389086SZhenguo Yao if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3309aa888a74SAndi Kleen break;
33100c397daeSMichal Hocko } else if (!alloc_pool_huge_page(h,
3311f60858f9SMike Kravetz &node_states[N_MEMORY],
3312f60858f9SMike Kravetz node_alloc_noretry))
33131da177e4SLinus Torvalds break;
331469ed779aSDavid Rientjes cond_resched();
33151da177e4SLinus Torvalds }
3316d715cf80SLiam R. Howlett if (i < h->max_huge_pages) {
3317d715cf80SLiam R. Howlett char buf[32];
3318d715cf80SLiam R. Howlett
3319c6247f72SMatthew Wilcox string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3320d715cf80SLiam R. Howlett pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3321d715cf80SLiam R. Howlett h->max_huge_pages, buf, i);
33228faa8b07SAndi Kleen h->max_huge_pages = i;
3323e5ff2159SAndi Kleen }
3324f60858f9SMike Kravetz kfree(node_alloc_noretry);
3325d715cf80SLiam R. Howlett }
3326e5ff2159SAndi Kleen
hugetlb_init_hstates(void)3327e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
3328e5ff2159SAndi Kleen {
332979dfc695SMike Kravetz struct hstate *h, *h2;
3330e5ff2159SAndi Kleen
3331e5ff2159SAndi Kleen for_each_hstate(h) {
33328faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */
3333bae7f4aeSLuiz Capitulino if (!hstate_is_gigantic(h))
33348faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h);
333579dfc695SMike Kravetz
333679dfc695SMike Kravetz /*
333779dfc695SMike Kravetz * Set demote order for each hstate. Note that
333879dfc695SMike Kravetz * h->demote_order is initially 0.
333979dfc695SMike Kravetz * - We can not demote gigantic pages if runtime freeing
334079dfc695SMike Kravetz * is not supported, so skip this.
3341a01f4390SMike Kravetz * - If CMA allocation is possible, we can not demote
3342a01f4390SMike Kravetz * HUGETLB_PAGE_ORDER or smaller size pages.
334379dfc695SMike Kravetz */
334479dfc695SMike Kravetz if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
334579dfc695SMike Kravetz continue;
3346a01f4390SMike Kravetz if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3347a01f4390SMike Kravetz continue;
334879dfc695SMike Kravetz for_each_hstate(h2) {
334979dfc695SMike Kravetz if (h2 == h)
335079dfc695SMike Kravetz continue;
335179dfc695SMike Kravetz if (h2->order < h->order &&
335279dfc695SMike Kravetz h2->order > h->demote_order)
335379dfc695SMike Kravetz h->demote_order = h2->order;
335479dfc695SMike Kravetz }
3355e5ff2159SAndi Kleen }
3356e5ff2159SAndi Kleen }
3357e5ff2159SAndi Kleen
report_hugepages(void)3358e5ff2159SAndi Kleen static void __init report_hugepages(void)
3359e5ff2159SAndi Kleen {
3360e5ff2159SAndi Kleen struct hstate *h;
3361e5ff2159SAndi Kleen
3362e5ff2159SAndi Kleen for_each_hstate(h) {
33634abd32dbSAndi Kleen char buf[32];
3364c6247f72SMatthew Wilcox
3365c6247f72SMatthew Wilcox string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
33666213834cSMuchun Song pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3367c6247f72SMatthew Wilcox buf, h->free_huge_pages);
33686213834cSMuchun Song pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
33696213834cSMuchun Song hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3370e5ff2159SAndi Kleen }
3371e5ff2159SAndi Kleen }
3372e5ff2159SAndi Kleen
33731da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)33746ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
33756ae11b27SLee Schermerhorn nodemask_t *nodes_allowed)
33761da177e4SLinus Torvalds {
33774415cc8dSChristoph Lameter int i;
33781121828aSMike Kravetz LIST_HEAD(page_list);
33794415cc8dSChristoph Lameter
33809487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
3381bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h))
3382aa888a74SAndi Kleen return;
3383aa888a74SAndi Kleen
33841121828aSMike Kravetz /*
33851121828aSMike Kravetz * Collect pages to be freed on a list, and free after dropping lock
33861121828aSMike Kravetz */
33876ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) {
338810c6ec49SMike Kravetz struct page *page, *next;
3389a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i];
3390a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) {
3391a5516438SAndi Kleen if (count >= h->nr_huge_pages)
33921121828aSMike Kravetz goto out;
33931da177e4SLinus Torvalds if (PageHighMem(page))
33941da177e4SLinus Torvalds continue;
3395cfd5082bSSidhartha Kumar remove_hugetlb_folio(h, page_folio(page), false);
33961121828aSMike Kravetz list_add(&page->lru, &page_list);
33971121828aSMike Kravetz }
33981121828aSMike Kravetz }
33991121828aSMike Kravetz
34001121828aSMike Kravetz out:
3401db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
340210c6ec49SMike Kravetz update_and_free_pages_bulk(h, &page_list);
3403db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
34041da177e4SLinus Torvalds }
34051da177e4SLinus Torvalds #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)34066ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
34076ae11b27SLee Schermerhorn nodemask_t *nodes_allowed)
34081da177e4SLinus Torvalds {
34091da177e4SLinus Torvalds }
34101da177e4SLinus Torvalds #endif
34111da177e4SLinus Torvalds
341220a0307cSWu Fengguang /*
341320a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters
341420a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion.
341520a0307cSWu Fengguang * Returns 1 if an adjustment was made.
341620a0307cSWu Fengguang */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)34176ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
34186ae11b27SLee Schermerhorn int delta)
341920a0307cSWu Fengguang {
3420b2261026SJoonsoo Kim int nr_nodes, node;
342120a0307cSWu Fengguang
34229487ca60SMike Kravetz lockdep_assert_held(&hugetlb_lock);
342320a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1);
342420a0307cSWu Fengguang
3425e8c5c824SLee Schermerhorn if (delta < 0) {
3426b2261026SJoonsoo Kim for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3427b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node])
3428b2261026SJoonsoo Kim goto found;
3429b2261026SJoonsoo Kim }
3430b2261026SJoonsoo Kim } else {
3431b2261026SJoonsoo Kim for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3432b2261026SJoonsoo Kim if (h->surplus_huge_pages_node[node] <
3433b2261026SJoonsoo Kim h->nr_huge_pages_node[node])
3434b2261026SJoonsoo Kim goto found;
3435e8c5c824SLee Schermerhorn }
34369a76db09SLee Schermerhorn }
3437b2261026SJoonsoo Kim return 0;
343820a0307cSWu Fengguang
3439b2261026SJoonsoo Kim found:
344020a0307cSWu Fengguang h->surplus_huge_pages += delta;
3441b2261026SJoonsoo Kim h->surplus_huge_pages_node[node] += delta;
3442b2261026SJoonsoo Kim return 1;
344320a0307cSWu Fengguang }
344420a0307cSWu Fengguang
3445a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)3446fd875dcaSMike Kravetz static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
34476ae11b27SLee Schermerhorn nodemask_t *nodes_allowed)
34481da177e4SLinus Torvalds {
34497893d1d5SAdam Litke unsigned long min_count, ret;
345010c6ec49SMike Kravetz struct page *page;
345110c6ec49SMike Kravetz LIST_HEAD(page_list);
3452f60858f9SMike Kravetz NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3453f60858f9SMike Kravetz
3454f60858f9SMike Kravetz /*
3455f60858f9SMike Kravetz * Bit mask controlling how hard we retry per-node allocations.
3456f60858f9SMike Kravetz * If we can not allocate the bit mask, do not attempt to allocate
3457f60858f9SMike Kravetz * the requested huge pages.
3458f60858f9SMike Kravetz */
3459f60858f9SMike Kravetz if (node_alloc_noretry)
3460f60858f9SMike Kravetz nodes_clear(*node_alloc_noretry);
3461f60858f9SMike Kravetz else
3462f60858f9SMike Kravetz return -ENOMEM;
34631da177e4SLinus Torvalds
346429383967SMike Kravetz /*
346529383967SMike Kravetz * resize_lock mutex prevents concurrent adjustments to number of
346629383967SMike Kravetz * pages in hstate via the proc/sysfs interfaces.
346729383967SMike Kravetz */
346829383967SMike Kravetz mutex_lock(&h->resize_lock);
3469b65d4adbSMuchun Song flush_free_hpage_work(h);
3470db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
34714eb0716eSAlexandre Ghiti
34724eb0716eSAlexandre Ghiti /*
3473fd875dcaSMike Kravetz * Check for a node specific request.
3474fd875dcaSMike Kravetz * Changing node specific huge page count may require a corresponding
3475fd875dcaSMike Kravetz * change to the global count. In any case, the passed node mask
3476fd875dcaSMike Kravetz * (nodes_allowed) will restrict alloc/free to the specified node.
3477fd875dcaSMike Kravetz */
3478fd875dcaSMike Kravetz if (nid != NUMA_NO_NODE) {
3479fd875dcaSMike Kravetz unsigned long old_count = count;
3480fd875dcaSMike Kravetz
3481fd875dcaSMike Kravetz count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3482fd875dcaSMike Kravetz /*
3483fd875dcaSMike Kravetz * User may have specified a large count value which caused the
3484fd875dcaSMike Kravetz * above calculation to overflow. In this case, they wanted
3485fd875dcaSMike Kravetz * to allocate as many huge pages as possible. Set count to
3486fd875dcaSMike Kravetz * largest possible value to align with their intention.
3487fd875dcaSMike Kravetz */
3488fd875dcaSMike Kravetz if (count < old_count)
3489fd875dcaSMike Kravetz count = ULONG_MAX;
3490fd875dcaSMike Kravetz }
3491fd875dcaSMike Kravetz
3492fd875dcaSMike Kravetz /*
34934eb0716eSAlexandre Ghiti * Gigantic pages runtime allocation depend on the capability for large
34944eb0716eSAlexandre Ghiti * page range allocation.
34954eb0716eSAlexandre Ghiti * If the system does not provide this feature, return an error when
34964eb0716eSAlexandre Ghiti * the user tries to allocate gigantic pages but let the user free the
34974eb0716eSAlexandre Ghiti * boottime allocated gigantic pages.
34984eb0716eSAlexandre Ghiti */
34994eb0716eSAlexandre Ghiti if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
35004eb0716eSAlexandre Ghiti if (count > persistent_huge_pages(h)) {
3501db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
350229383967SMike Kravetz mutex_unlock(&h->resize_lock);
3503f60858f9SMike Kravetz NODEMASK_FREE(node_alloc_noretry);
35044eb0716eSAlexandre Ghiti return -EINVAL;
35054eb0716eSAlexandre Ghiti }
35064eb0716eSAlexandre Ghiti /* Fall through to decrease pool */
35074eb0716eSAlexandre Ghiti }
3508aa888a74SAndi Kleen
35097893d1d5SAdam Litke /*
35107893d1d5SAdam Litke * Increase the pool size
35117893d1d5SAdam Litke * First take pages out of surplus state. Then make up the
35127893d1d5SAdam Litke * remaining difference by allocating fresh huge pages.
3513d1c3fb1fSNishanth Aravamudan *
35143a740e8bSSidhartha Kumar * We might race with alloc_surplus_hugetlb_folio() here and be unable
3515d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is
3516d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the
3517d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but
3518d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls.
35197893d1d5SAdam Litke */
3520a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
35216ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1))
35227893d1d5SAdam Litke break;
35237893d1d5SAdam Litke }
35247893d1d5SAdam Litke
3525a5516438SAndi Kleen while (count > persistent_huge_pages(h)) {
35267893d1d5SAdam Litke /*
35277893d1d5SAdam Litke * If this allocation races such that we no longer need the
3528454a00c4SMatthew Wilcox (Oracle) * page, free_huge_folio will handle it by freeing the page
35297893d1d5SAdam Litke * and reducing the surplus.
35307893d1d5SAdam Litke */
3531db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
3532649920c6SJia He
3533649920c6SJia He /* yield cpu to avoid soft lockup */
3534649920c6SJia He cond_resched();
3535649920c6SJia He
3536f60858f9SMike Kravetz ret = alloc_pool_huge_page(h, nodes_allowed,
3537f60858f9SMike Kravetz node_alloc_noretry);
3538db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
35397893d1d5SAdam Litke if (!ret)
35407893d1d5SAdam Litke goto out;
35417893d1d5SAdam Litke
3542536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */
3543536240f2SMel Gorman if (signal_pending(current))
3544536240f2SMel Gorman goto out;
35457893d1d5SAdam Litke }
35467893d1d5SAdam Litke
35477893d1d5SAdam Litke /*
35487893d1d5SAdam Litke * Decrease the pool size
35497893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful
35507893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place
35517893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink
35527893d1d5SAdam Litke * to the desired size as pages become free.
3553d1c3fb1fSNishanth Aravamudan *
3554d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the
3555d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to
3556d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since
35573a740e8bSSidhartha Kumar * alloc_surplus_hugetlb_folio() is checking the global counter,
3558d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus
3559d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the
3560d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use.
35617893d1d5SAdam Litke */
3562a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
35636b0c880dSAdam Litke min_count = max(count, min_count);
35646ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed);
356510c6ec49SMike Kravetz
356610c6ec49SMike Kravetz /*
356710c6ec49SMike Kravetz * Collect pages to be removed on list without dropping lock
356810c6ec49SMike Kravetz */
3569a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) {
357010c6ec49SMike Kravetz page = remove_pool_huge_page(h, nodes_allowed, 0);
357110c6ec49SMike Kravetz if (!page)
35721da177e4SLinus Torvalds break;
357310c6ec49SMike Kravetz
357410c6ec49SMike Kravetz list_add(&page->lru, &page_list);
35751da177e4SLinus Torvalds }
357610c6ec49SMike Kravetz /* free the pages after dropping lock */
3577db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
357810c6ec49SMike Kravetz update_and_free_pages_bulk(h, &page_list);
3579b65d4adbSMuchun Song flush_free_hpage_work(h);
3580db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
358110c6ec49SMike Kravetz
3582a5516438SAndi Kleen while (count < persistent_huge_pages(h)) {
35836ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1))
35847893d1d5SAdam Litke break;
35857893d1d5SAdam Litke }
35867893d1d5SAdam Litke out:
35874eb0716eSAlexandre Ghiti h->max_huge_pages = persistent_huge_pages(h);
3588db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
358929383967SMike Kravetz mutex_unlock(&h->resize_lock);
35904eb0716eSAlexandre Ghiti
3591f60858f9SMike Kravetz NODEMASK_FREE(node_alloc_noretry);
3592f60858f9SMike Kravetz
35934eb0716eSAlexandre Ghiti return 0;
35941da177e4SLinus Torvalds }
35951da177e4SLinus Torvalds
demote_free_hugetlb_folio(struct hstate * h,struct folio * folio)3596bdd7be07SSidhartha Kumar static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
35978531fc6fSMike Kravetz {
3598bdd7be07SSidhartha Kumar int i, nid = folio_nid(folio);
35998531fc6fSMike Kravetz struct hstate *target_hstate;
360031731452SDoug Berger struct page *subpage;
3601bdd7be07SSidhartha Kumar struct folio *inner_folio;
36028531fc6fSMike Kravetz int rc = 0;
36038531fc6fSMike Kravetz
36048531fc6fSMike Kravetz target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
36058531fc6fSMike Kravetz
3606cfd5082bSSidhartha Kumar remove_hugetlb_folio_for_demote(h, folio, false);
36078531fc6fSMike Kravetz spin_unlock_irq(&hugetlb_lock);
36088531fc6fSMike Kravetz
3609bdd7be07SSidhartha Kumar rc = hugetlb_vmemmap_restore(h, &folio->page);
36108531fc6fSMike Kravetz if (rc) {
3611bdd7be07SSidhartha Kumar /* Allocation of vmemmmap failed, we can not demote folio */
36128531fc6fSMike Kravetz spin_lock_irq(&hugetlb_lock);
3613bdd7be07SSidhartha Kumar folio_ref_unfreeze(folio, 1);
3614bdd7be07SSidhartha Kumar add_hugetlb_folio(h, folio, false);
36158531fc6fSMike Kravetz return rc;
36168531fc6fSMike Kravetz }
36178531fc6fSMike Kravetz
36188531fc6fSMike Kravetz /*
3619911565b8SSidhartha Kumar * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3620bdd7be07SSidhartha Kumar * sizes as it will not ref count folios.
36218531fc6fSMike Kravetz */
3622911565b8SSidhartha Kumar destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
36238531fc6fSMike Kravetz
36248531fc6fSMike Kravetz /*
36258531fc6fSMike Kravetz * Taking target hstate mutex synchronizes with set_max_huge_pages.
36268531fc6fSMike Kravetz * Without the mutex, pages added to target hstate could be marked
36278531fc6fSMike Kravetz * as surplus.
36288531fc6fSMike Kravetz *
36298531fc6fSMike Kravetz * Note that we already hold h->resize_lock. To prevent deadlock,
36308531fc6fSMike Kravetz * use the convention of always taking larger size hstate mutex first.
36318531fc6fSMike Kravetz */
36328531fc6fSMike Kravetz mutex_lock(&target_hstate->resize_lock);
36338531fc6fSMike Kravetz for (i = 0; i < pages_per_huge_page(h);
36348531fc6fSMike Kravetz i += pages_per_huge_page(target_hstate)) {
3635bdd7be07SSidhartha Kumar subpage = folio_page(folio, i);
3636bdd7be07SSidhartha Kumar inner_folio = page_folio(subpage);
36378531fc6fSMike Kravetz if (hstate_is_gigantic(target_hstate))
3638bdd7be07SSidhartha Kumar prep_compound_gigantic_folio_for_demote(inner_folio,
36398531fc6fSMike Kravetz target_hstate->order);
36408531fc6fSMike Kravetz else
364131731452SDoug Berger prep_compound_page(subpage, target_hstate->order);
3642bdd7be07SSidhartha Kumar folio_change_private(inner_folio, NULL);
3643bdd7be07SSidhartha Kumar prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
3644454a00c4SMatthew Wilcox (Oracle) free_huge_folio(inner_folio);
36458531fc6fSMike Kravetz }
36468531fc6fSMike Kravetz mutex_unlock(&target_hstate->resize_lock);
36478531fc6fSMike Kravetz
36488531fc6fSMike Kravetz spin_lock_irq(&hugetlb_lock);
36498531fc6fSMike Kravetz
36508531fc6fSMike Kravetz /*
36518531fc6fSMike Kravetz * Not absolutely necessary, but for consistency update max_huge_pages
36528531fc6fSMike Kravetz * based on pool changes for the demoted page.
36538531fc6fSMike Kravetz */
36548531fc6fSMike Kravetz h->max_huge_pages--;
3655a43a83c7SMiaohe Lin target_hstate->max_huge_pages +=
3656a43a83c7SMiaohe Lin pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
36578531fc6fSMike Kravetz
36588531fc6fSMike Kravetz return rc;
36598531fc6fSMike Kravetz }
36608531fc6fSMike Kravetz
demote_pool_huge_page(struct hstate * h,nodemask_t * nodes_allowed)366179dfc695SMike Kravetz static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
366279dfc695SMike Kravetz __must_hold(&hugetlb_lock)
366379dfc695SMike Kravetz {
36648531fc6fSMike Kravetz int nr_nodes, node;
3665bdd7be07SSidhartha Kumar struct folio *folio;
366679dfc695SMike Kravetz
366779dfc695SMike Kravetz lockdep_assert_held(&hugetlb_lock);
366879dfc695SMike Kravetz
366979dfc695SMike Kravetz /* We should never get here if no demote order */
367079dfc695SMike Kravetz if (!h->demote_order) {
367179dfc695SMike Kravetz pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
367279dfc695SMike Kravetz return -EINVAL; /* internal error */
367379dfc695SMike Kravetz }
367479dfc695SMike Kravetz
36758531fc6fSMike Kravetz for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3676bdd7be07SSidhartha Kumar list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3677bdd7be07SSidhartha Kumar if (folio_test_hwpoison(folio))
36785a317412SMike Kravetz continue;
3679bdd7be07SSidhartha Kumar return demote_free_hugetlb_folio(h, folio);
36808531fc6fSMike Kravetz }
36818531fc6fSMike Kravetz }
36828531fc6fSMike Kravetz
36835a317412SMike Kravetz /*
36845a317412SMike Kravetz * Only way to get here is if all pages on free lists are poisoned.
36855a317412SMike Kravetz * Return -EBUSY so that caller will not retry.
36865a317412SMike Kravetz */
36875a317412SMike Kravetz return -EBUSY;
368879dfc695SMike Kravetz }
368979dfc695SMike Kravetz
3690a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
3691a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3692a3437870SNishanth Aravamudan
369379dfc695SMike Kravetz #define HSTATE_ATTR_WO(_name) \
369479dfc695SMike Kravetz static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
369579dfc695SMike Kravetz
3696a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
369798bc26acSMiaohe Lin static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3698a3437870SNishanth Aravamudan
3699a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
3700a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3701a3437870SNishanth Aravamudan
37029a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
37039a305230SLee Schermerhorn
kobj_to_hstate(struct kobject * kobj,int * nidp)37049a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3705a3437870SNishanth Aravamudan {
3706a3437870SNishanth Aravamudan int i;
37079a305230SLee Schermerhorn
3708a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++)
37099a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) {
37109a305230SLee Schermerhorn if (nidp)
37119a305230SLee Schermerhorn *nidp = NUMA_NO_NODE;
3712a3437870SNishanth Aravamudan return &hstates[i];
37139a305230SLee Schermerhorn }
37149a305230SLee Schermerhorn
37159a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp);
3716a3437870SNishanth Aravamudan }
3717a3437870SNishanth Aravamudan
nr_hugepages_show_common(struct kobject * kobj,struct kobj_attribute * attr,char * buf)371806808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3719a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf)
3720a3437870SNishanth Aravamudan {
37219a305230SLee Schermerhorn struct hstate *h;
37229a305230SLee Schermerhorn unsigned long nr_huge_pages;
37239a305230SLee Schermerhorn int nid;
37249a305230SLee Schermerhorn
37259a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid);
37269a305230SLee Schermerhorn if (nid == NUMA_NO_NODE)
37279a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages;
37289a305230SLee Schermerhorn else
37299a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid];
37309a305230SLee Schermerhorn
3731ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", nr_huge_pages);
3732a3437870SNishanth Aravamudan }
3733adbe8726SEric B Munson
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)3734238d3c13SDavid Rientjes static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
3735238d3c13SDavid Rientjes struct hstate *h, int nid,
3736238d3c13SDavid Rientjes unsigned long count, size_t len)
3737a3437870SNishanth Aravamudan {
3738a3437870SNishanth Aravamudan int err;
37392d0adf7eSOscar Salvador nodemask_t nodes_allowed, *n_mask;
3740a3437870SNishanth Aravamudan
37412d0adf7eSOscar Salvador if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
37422d0adf7eSOscar Salvador return -EINVAL;
3743adbe8726SEric B Munson
37449a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) {
37459a305230SLee Schermerhorn /*
37469a305230SLee Schermerhorn * global hstate attribute
37479a305230SLee Schermerhorn */
37489a305230SLee Schermerhorn if (!(obey_mempolicy &&
37492d0adf7eSOscar Salvador init_nodemask_of_mempolicy(&nodes_allowed)))
37502d0adf7eSOscar Salvador n_mask = &node_states[N_MEMORY];
37512d0adf7eSOscar Salvador else
37522d0adf7eSOscar Salvador n_mask = &nodes_allowed;
37532d0adf7eSOscar Salvador } else {
37549a305230SLee Schermerhorn /*
3755fd875dcaSMike Kravetz * Node specific request. count adjustment happens in
3756fd875dcaSMike Kravetz * set_max_huge_pages() after acquiring hugetlb_lock.
37579a305230SLee Schermerhorn */
37582d0adf7eSOscar Salvador init_nodemask_of_node(&nodes_allowed, nid);
37592d0adf7eSOscar Salvador n_mask = &nodes_allowed;
3760fd875dcaSMike Kravetz }
37619a305230SLee Schermerhorn
37622d0adf7eSOscar Salvador err = set_max_huge_pages(h, count, nid, n_mask);
376306808b08SLee Schermerhorn
37644eb0716eSAlexandre Ghiti return err ? err : len;
376506808b08SLee Schermerhorn }
376606808b08SLee Schermerhorn
nr_hugepages_store_common(bool obey_mempolicy,struct kobject * kobj,const char * buf,size_t len)3767238d3c13SDavid Rientjes static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
3768238d3c13SDavid Rientjes struct kobject *kobj, const char *buf,
3769238d3c13SDavid Rientjes size_t len)
3770238d3c13SDavid Rientjes {
3771238d3c13SDavid Rientjes struct hstate *h;
3772238d3c13SDavid Rientjes unsigned long count;
3773238d3c13SDavid Rientjes int nid;
3774238d3c13SDavid Rientjes int err;
3775238d3c13SDavid Rientjes
3776238d3c13SDavid Rientjes err = kstrtoul(buf, 10, &count);
3777238d3c13SDavid Rientjes if (err)
3778238d3c13SDavid Rientjes return err;
3779238d3c13SDavid Rientjes
3780238d3c13SDavid Rientjes h = kobj_to_hstate(kobj, &nid);
3781238d3c13SDavid Rientjes return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3782238d3c13SDavid Rientjes }
3783238d3c13SDavid Rientjes
nr_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)378406808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
378506808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf)
378606808b08SLee Schermerhorn {
378706808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf);
378806808b08SLee Schermerhorn }
378906808b08SLee Schermerhorn
nr_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)379006808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
379106808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len)
379206808b08SLee Schermerhorn {
3793238d3c13SDavid Rientjes return nr_hugepages_store_common(false, kobj, buf, len);
3794a3437870SNishanth Aravamudan }
3795a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
3796a3437870SNishanth Aravamudan
379706808b08SLee Schermerhorn #ifdef CONFIG_NUMA
379806808b08SLee Schermerhorn
379906808b08SLee Schermerhorn /*
380006808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent
380106808b08SLee Schermerhorn * huge page alloc/free.
380206808b08SLee Schermerhorn */
nr_hugepages_mempolicy_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)380306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3804ae7a927dSJoe Perches struct kobj_attribute *attr,
3805ae7a927dSJoe Perches char *buf)
380606808b08SLee Schermerhorn {
380706808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf);
380806808b08SLee Schermerhorn }
380906808b08SLee Schermerhorn
nr_hugepages_mempolicy_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)381006808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
381106808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len)
381206808b08SLee Schermerhorn {
3813238d3c13SDavid Rientjes return nr_hugepages_store_common(true, kobj, buf, len);
381406808b08SLee Schermerhorn }
381506808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
381606808b08SLee Schermerhorn #endif
381706808b08SLee Schermerhorn
381806808b08SLee Schermerhorn
nr_overcommit_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3819a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
3820a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf)
3821a3437870SNishanth Aravamudan {
38229a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL);
3823ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3824a3437870SNishanth Aravamudan }
3825adbe8726SEric B Munson
nr_overcommit_hugepages_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)3826a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
3827a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count)
3828a3437870SNishanth Aravamudan {
3829a3437870SNishanth Aravamudan int err;
3830a3437870SNishanth Aravamudan unsigned long input;
38319a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL);
3832a3437870SNishanth Aravamudan
3833bae7f4aeSLuiz Capitulino if (hstate_is_gigantic(h))
3834adbe8726SEric B Munson return -EINVAL;
3835adbe8726SEric B Munson
38363dbb95f7SJingoo Han err = kstrtoul(buf, 10, &input);
3837a3437870SNishanth Aravamudan if (err)
383873ae31e5SEric B Munson return err;
3839a3437870SNishanth Aravamudan
3840db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
3841a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input;
3842db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
3843a3437870SNishanth Aravamudan
3844a3437870SNishanth Aravamudan return count;
3845a3437870SNishanth Aravamudan }
3846a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
3847a3437870SNishanth Aravamudan
free_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3848a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
3849a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf)
3850a3437870SNishanth Aravamudan {
38519a305230SLee Schermerhorn struct hstate *h;
38529a305230SLee Schermerhorn unsigned long free_huge_pages;
38539a305230SLee Schermerhorn int nid;
38549a305230SLee Schermerhorn
38559a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid);
38569a305230SLee Schermerhorn if (nid == NUMA_NO_NODE)
38579a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages;
38589a305230SLee Schermerhorn else
38599a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid];
38609a305230SLee Schermerhorn
3861ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", free_huge_pages);
3862a3437870SNishanth Aravamudan }
3863a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
3864a3437870SNishanth Aravamudan
resv_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3865a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
3866a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf)
3867a3437870SNishanth Aravamudan {
38689a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL);
3869ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3870a3437870SNishanth Aravamudan }
3871a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
3872a3437870SNishanth Aravamudan
surplus_hugepages_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)3873a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
3874a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf)
3875a3437870SNishanth Aravamudan {
38769a305230SLee Schermerhorn struct hstate *h;
38779a305230SLee Schermerhorn unsigned long surplus_huge_pages;
38789a305230SLee Schermerhorn int nid;
38799a305230SLee Schermerhorn
38809a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid);
38819a305230SLee Schermerhorn if (nid == NUMA_NO_NODE)
38829a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages;
38839a305230SLee Schermerhorn else
38849a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid];
38859a305230SLee Schermerhorn
3886ae7a927dSJoe Perches return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3887a3437870SNishanth Aravamudan }
3888a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
3889a3437870SNishanth Aravamudan
demote_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t len)389079dfc695SMike Kravetz static ssize_t demote_store(struct kobject *kobj,
389179dfc695SMike Kravetz struct kobj_attribute *attr, const char *buf, size_t len)
389279dfc695SMike Kravetz {
389379dfc695SMike Kravetz unsigned long nr_demote;
389479dfc695SMike Kravetz unsigned long nr_available;
389579dfc695SMike Kravetz nodemask_t nodes_allowed, *n_mask;
389679dfc695SMike Kravetz struct hstate *h;
38978eeda55fSLi zeming int err;
389879dfc695SMike Kravetz int nid;
389979dfc695SMike Kravetz
390079dfc695SMike Kravetz err = kstrtoul(buf, 10, &nr_demote);
390179dfc695SMike Kravetz if (err)
390279dfc695SMike Kravetz return err;
390379dfc695SMike Kravetz h = kobj_to_hstate(kobj, &nid);
390479dfc695SMike Kravetz
390579dfc695SMike Kravetz if (nid != NUMA_NO_NODE) {
390679dfc695SMike Kravetz init_nodemask_of_node(&nodes_allowed, nid);
390779dfc695SMike Kravetz n_mask = &nodes_allowed;
390879dfc695SMike Kravetz } else {
390979dfc695SMike Kravetz n_mask = &node_states[N_MEMORY];
391079dfc695SMike Kravetz }
391179dfc695SMike Kravetz
391279dfc695SMike Kravetz /* Synchronize with other sysfs operations modifying huge pages */
391379dfc695SMike Kravetz mutex_lock(&h->resize_lock);
391479dfc695SMike Kravetz spin_lock_irq(&hugetlb_lock);
391579dfc695SMike Kravetz
391679dfc695SMike Kravetz while (nr_demote) {
391779dfc695SMike Kravetz /*
391879dfc695SMike Kravetz * Check for available pages to demote each time thorough the
391979dfc695SMike Kravetz * loop as demote_pool_huge_page will drop hugetlb_lock.
392079dfc695SMike Kravetz */
392179dfc695SMike Kravetz if (nid != NUMA_NO_NODE)
392279dfc695SMike Kravetz nr_available = h->free_huge_pages_node[nid];
392379dfc695SMike Kravetz else
392479dfc695SMike Kravetz nr_available = h->free_huge_pages;
392579dfc695SMike Kravetz nr_available -= h->resv_huge_pages;
392679dfc695SMike Kravetz if (!nr_available)
392779dfc695SMike Kravetz break;
392879dfc695SMike Kravetz
392979dfc695SMike Kravetz err = demote_pool_huge_page(h, n_mask);
393079dfc695SMike Kravetz if (err)
393179dfc695SMike Kravetz break;
393279dfc695SMike Kravetz
393379dfc695SMike Kravetz nr_demote--;
393479dfc695SMike Kravetz }
393579dfc695SMike Kravetz
393679dfc695SMike Kravetz spin_unlock_irq(&hugetlb_lock);
393779dfc695SMike Kravetz mutex_unlock(&h->resize_lock);
393879dfc695SMike Kravetz
393979dfc695SMike Kravetz if (err)
394079dfc695SMike Kravetz return err;
394179dfc695SMike Kravetz return len;
394279dfc695SMike Kravetz }
394379dfc695SMike Kravetz HSTATE_ATTR_WO(demote);
394479dfc695SMike Kravetz
demote_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)394579dfc695SMike Kravetz static ssize_t demote_size_show(struct kobject *kobj,
394679dfc695SMike Kravetz struct kobj_attribute *attr, char *buf)
394779dfc695SMike Kravetz {
394812658abfSMiaohe Lin struct hstate *h = kobj_to_hstate(kobj, NULL);
394979dfc695SMike Kravetz unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
395079dfc695SMike Kravetz
395179dfc695SMike Kravetz return sysfs_emit(buf, "%lukB\n", demote_size);
395279dfc695SMike Kravetz }
395379dfc695SMike Kravetz
demote_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)395479dfc695SMike Kravetz static ssize_t demote_size_store(struct kobject *kobj,
395579dfc695SMike Kravetz struct kobj_attribute *attr,
395679dfc695SMike Kravetz const char *buf, size_t count)
395779dfc695SMike Kravetz {
395879dfc695SMike Kravetz struct hstate *h, *demote_hstate;
395979dfc695SMike Kravetz unsigned long demote_size;
396079dfc695SMike Kravetz unsigned int demote_order;
396179dfc695SMike Kravetz
396279dfc695SMike Kravetz demote_size = (unsigned long)memparse(buf, NULL);
396379dfc695SMike Kravetz
396479dfc695SMike Kravetz demote_hstate = size_to_hstate(demote_size);
396579dfc695SMike Kravetz if (!demote_hstate)
396679dfc695SMike Kravetz return -EINVAL;
396779dfc695SMike Kravetz demote_order = demote_hstate->order;
3968a01f4390SMike Kravetz if (demote_order < HUGETLB_PAGE_ORDER)
3969a01f4390SMike Kravetz return -EINVAL;
397079dfc695SMike Kravetz
397179dfc695SMike Kravetz /* demote order must be smaller than hstate order */
397212658abfSMiaohe Lin h = kobj_to_hstate(kobj, NULL);
397379dfc695SMike Kravetz if (demote_order >= h->order)
397479dfc695SMike Kravetz return -EINVAL;
397579dfc695SMike Kravetz
397679dfc695SMike Kravetz /* resize_lock synchronizes access to demote size and writes */
397779dfc695SMike Kravetz mutex_lock(&h->resize_lock);
397879dfc695SMike Kravetz h->demote_order = demote_order;
397979dfc695SMike Kravetz mutex_unlock(&h->resize_lock);
398079dfc695SMike Kravetz
398179dfc695SMike Kravetz return count;
398279dfc695SMike Kravetz }
398379dfc695SMike Kravetz HSTATE_ATTR(demote_size);
398479dfc695SMike Kravetz
3985a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
3986a3437870SNishanth Aravamudan &nr_hugepages_attr.attr,
3987a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr,
3988a3437870SNishanth Aravamudan &free_hugepages_attr.attr,
3989a3437870SNishanth Aravamudan &resv_hugepages_attr.attr,
3990a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr,
399106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
399206808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr,
399306808b08SLee Schermerhorn #endif
3994a3437870SNishanth Aravamudan NULL,
3995a3437870SNishanth Aravamudan };
3996a3437870SNishanth Aravamudan
399767e5ed96SArvind Yadav static const struct attribute_group hstate_attr_group = {
3998a3437870SNishanth Aravamudan .attrs = hstate_attrs,
3999a3437870SNishanth Aravamudan };
4000a3437870SNishanth Aravamudan
400179dfc695SMike Kravetz static struct attribute *hstate_demote_attrs[] = {
400279dfc695SMike Kravetz &demote_size_attr.attr,
400379dfc695SMike Kravetz &demote_attr.attr,
400479dfc695SMike Kravetz NULL,
400579dfc695SMike Kravetz };
400679dfc695SMike Kravetz
400779dfc695SMike Kravetz static const struct attribute_group hstate_demote_attr_group = {
400879dfc695SMike Kravetz .attrs = hstate_demote_attrs,
400979dfc695SMike Kravetz };
401079dfc695SMike Kravetz
hugetlb_sysfs_add_hstate(struct hstate * h,struct kobject * parent,struct kobject ** hstate_kobjs,const struct attribute_group * hstate_attr_group)4011094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
40129a305230SLee Schermerhorn struct kobject **hstate_kobjs,
401367e5ed96SArvind Yadav const struct attribute_group *hstate_attr_group)
4014a3437870SNishanth Aravamudan {
4015a3437870SNishanth Aravamudan int retval;
4016972dc4deSAneesh Kumar K.V int hi = hstate_index(h);
4017a3437870SNishanth Aravamudan
40189a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
40199a305230SLee Schermerhorn if (!hstate_kobjs[hi])
4020a3437870SNishanth Aravamudan return -ENOMEM;
4021a3437870SNishanth Aravamudan
40229a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
4023cc2205a6SMiaohe Lin if (retval) {
40249a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]);
4025cc2205a6SMiaohe Lin hstate_kobjs[hi] = NULL;
40263a6bdda0SMiaohe Lin return retval;
4027cc2205a6SMiaohe Lin }
4028a3437870SNishanth Aravamudan
402979dfc695SMike Kravetz if (h->demote_order) {
403001088a60SMiaohe Lin retval = sysfs_create_group(hstate_kobjs[hi],
403101088a60SMiaohe Lin &hstate_demote_attr_group);
403201088a60SMiaohe Lin if (retval) {
403379dfc695SMike Kravetz pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
403401088a60SMiaohe Lin sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
403501088a60SMiaohe Lin kobject_put(hstate_kobjs[hi]);
403601088a60SMiaohe Lin hstate_kobjs[hi] = NULL;
403701088a60SMiaohe Lin return retval;
403801088a60SMiaohe Lin }
403979dfc695SMike Kravetz }
404079dfc695SMike Kravetz
404101088a60SMiaohe Lin return 0;
4042a3437870SNishanth Aravamudan }
4043a3437870SNishanth Aravamudan
40449a305230SLee Schermerhorn #ifdef CONFIG_NUMA
4045a4a00b45SMuchun Song static bool hugetlb_sysfs_initialized __ro_after_init;
40469a305230SLee Schermerhorn
40479a305230SLee Schermerhorn /*
40489a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects,
404910fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array
405010fbcf4cSKay Sievers * index of a node device or _hstate == node id.
405110fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in
40529a305230SLee Schermerhorn * the base kernel, on the hugetlb module.
40539a305230SLee Schermerhorn */
40549a305230SLee Schermerhorn struct node_hstate {
40559a305230SLee Schermerhorn struct kobject *hugepages_kobj;
40569a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
40579a305230SLee Schermerhorn };
4058b4e289a6SAlexander Kuleshov static struct node_hstate node_hstates[MAX_NUMNODES];
40599a305230SLee Schermerhorn
40609a305230SLee Schermerhorn /*
406110fbcf4cSKay Sievers * A subset of global hstate attributes for node devices
40629a305230SLee Schermerhorn */
40639a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
40649a305230SLee Schermerhorn &nr_hugepages_attr.attr,
40659a305230SLee Schermerhorn &free_hugepages_attr.attr,
40669a305230SLee Schermerhorn &surplus_hugepages_attr.attr,
40679a305230SLee Schermerhorn NULL,
40689a305230SLee Schermerhorn };
40699a305230SLee Schermerhorn
407067e5ed96SArvind Yadav static const struct attribute_group per_node_hstate_attr_group = {
40719a305230SLee Schermerhorn .attrs = per_node_hstate_attrs,
40729a305230SLee Schermerhorn };
40739a305230SLee Schermerhorn
40749a305230SLee Schermerhorn /*
407510fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
40769a305230SLee Schermerhorn * Returns node id via non-NULL nidp.
40779a305230SLee Schermerhorn */
kobj_to_node_hstate(struct kobject * kobj,int * nidp)40789a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
40799a305230SLee Schermerhorn {
40809a305230SLee Schermerhorn int nid;
40819a305230SLee Schermerhorn
40829a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) {
40839a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid];
40849a305230SLee Schermerhorn int i;
40859a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++)
40869a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) {
40879a305230SLee Schermerhorn if (nidp)
40889a305230SLee Schermerhorn *nidp = nid;
40899a305230SLee Schermerhorn return &hstates[i];
40909a305230SLee Schermerhorn }
40919a305230SLee Schermerhorn }
40929a305230SLee Schermerhorn
40939a305230SLee Schermerhorn BUG();
40949a305230SLee Schermerhorn return NULL;
40959a305230SLee Schermerhorn }
40969a305230SLee Schermerhorn
40979a305230SLee Schermerhorn /*
409810fbcf4cSKay Sievers * Unregister hstate attributes from a single node device.
40999a305230SLee Schermerhorn * No-op if no hstate attributes attached.
41009a305230SLee Schermerhorn */
hugetlb_unregister_node(struct node * node)4101a4a00b45SMuchun Song void hugetlb_unregister_node(struct node *node)
41029a305230SLee Schermerhorn {
41039a305230SLee Schermerhorn struct hstate *h;
410410fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id];
41059a305230SLee Schermerhorn
41069a305230SLee Schermerhorn if (!nhs->hugepages_kobj)
41079b5e5d0fSLee Schermerhorn return; /* no hstate attributes */
41089a305230SLee Schermerhorn
4109972dc4deSAneesh Kumar K.V for_each_hstate(h) {
4110972dc4deSAneesh Kumar K.V int idx = hstate_index(h);
411101088a60SMiaohe Lin struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
411201088a60SMiaohe Lin
411301088a60SMiaohe Lin if (!hstate_kobj)
411401088a60SMiaohe Lin continue;
411501088a60SMiaohe Lin if (h->demote_order)
411601088a60SMiaohe Lin sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
411701088a60SMiaohe Lin sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
411801088a60SMiaohe Lin kobject_put(hstate_kobj);
4119972dc4deSAneesh Kumar K.V nhs->hstate_kobjs[idx] = NULL;
4120972dc4deSAneesh Kumar K.V }
41219a305230SLee Schermerhorn
41229a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj);
41239a305230SLee Schermerhorn nhs->hugepages_kobj = NULL;
41249a305230SLee Schermerhorn }
41259a305230SLee Schermerhorn
41269a305230SLee Schermerhorn
41279a305230SLee Schermerhorn /*
412810fbcf4cSKay Sievers * Register hstate attributes for a single node device.
41299a305230SLee Schermerhorn * No-op if attributes already registered.
41309a305230SLee Schermerhorn */
hugetlb_register_node(struct node * node)4131a4a00b45SMuchun Song void hugetlb_register_node(struct node *node)
41329a305230SLee Schermerhorn {
41339a305230SLee Schermerhorn struct hstate *h;
413410fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id];
41359a305230SLee Schermerhorn int err;
41369a305230SLee Schermerhorn
4137a4a00b45SMuchun Song if (!hugetlb_sysfs_initialized)
4138a4a00b45SMuchun Song return;
4139a4a00b45SMuchun Song
41409a305230SLee Schermerhorn if (nhs->hugepages_kobj)
41419a305230SLee Schermerhorn return; /* already allocated */
41429a305230SLee Schermerhorn
41439a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages",
414410fbcf4cSKay Sievers &node->dev.kobj);
41459a305230SLee Schermerhorn if (!nhs->hugepages_kobj)
41469a305230SLee Schermerhorn return;
41479a305230SLee Schermerhorn
41489a305230SLee Schermerhorn for_each_hstate(h) {
41499a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
41509a305230SLee Schermerhorn nhs->hstate_kobjs,
41519a305230SLee Schermerhorn &per_node_hstate_attr_group);
41529a305230SLee Schermerhorn if (err) {
4153282f4214SMike Kravetz pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
415410fbcf4cSKay Sievers h->name, node->dev.id);
41559a305230SLee Schermerhorn hugetlb_unregister_node(node);
41569a305230SLee Schermerhorn break;
41579a305230SLee Schermerhorn }
41589a305230SLee Schermerhorn }
41599a305230SLee Schermerhorn }
41609a305230SLee Schermerhorn
41619a305230SLee Schermerhorn /*
41629b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node
416310fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have
416410fbcf4cSKay Sievers * registered their associated device by this time.
41659a305230SLee Schermerhorn */
hugetlb_register_all_nodes(void)41667d9ca000SLuiz Capitulino static void __init hugetlb_register_all_nodes(void)
41679a305230SLee Schermerhorn {
41689a305230SLee Schermerhorn int nid;
41699a305230SLee Schermerhorn
4170a4a00b45SMuchun Song for_each_online_node(nid)
4171b958d4d0SMuchun Song hugetlb_register_node(node_devices[nid]);
41729a305230SLee Schermerhorn }
41739a305230SLee Schermerhorn #else /* !CONFIG_NUMA */
41749a305230SLee Schermerhorn
kobj_to_node_hstate(struct kobject * kobj,int * nidp)41759a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
41769a305230SLee Schermerhorn {
41779a305230SLee Schermerhorn BUG();
41789a305230SLee Schermerhorn if (nidp)
41799a305230SLee Schermerhorn *nidp = -1;
41809a305230SLee Schermerhorn return NULL;
41819a305230SLee Schermerhorn }
41829a305230SLee Schermerhorn
hugetlb_register_all_nodes(void)41839a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
41849a305230SLee Schermerhorn
41859a305230SLee Schermerhorn #endif
41869a305230SLee Schermerhorn
4187263b8998SMiaohe Lin #ifdef CONFIG_CMA
4188263b8998SMiaohe Lin static void __init hugetlb_cma_check(void);
4189263b8998SMiaohe Lin #else
hugetlb_cma_check(void)4190263b8998SMiaohe Lin static inline __init void hugetlb_cma_check(void)
4191263b8998SMiaohe Lin {
4192263b8998SMiaohe Lin }
4193263b8998SMiaohe Lin #endif
4194263b8998SMiaohe Lin
hugetlb_sysfs_init(void)4195a4a00b45SMuchun Song static void __init hugetlb_sysfs_init(void)
4196a4a00b45SMuchun Song {
4197a4a00b45SMuchun Song struct hstate *h;
4198a4a00b45SMuchun Song int err;
4199a4a00b45SMuchun Song
4200a4a00b45SMuchun Song hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4201a4a00b45SMuchun Song if (!hugepages_kobj)
4202a4a00b45SMuchun Song return;
4203a4a00b45SMuchun Song
4204a4a00b45SMuchun Song for_each_hstate(h) {
4205a4a00b45SMuchun Song err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4206a4a00b45SMuchun Song hstate_kobjs, &hstate_attr_group);
4207a4a00b45SMuchun Song if (err)
4208a4a00b45SMuchun Song pr_err("HugeTLB: Unable to add hstate %s", h->name);
4209a4a00b45SMuchun Song }
4210a4a00b45SMuchun Song
4211a4a00b45SMuchun Song #ifdef CONFIG_NUMA
4212a4a00b45SMuchun Song hugetlb_sysfs_initialized = true;
4213a4a00b45SMuchun Song #endif
4214a4a00b45SMuchun Song hugetlb_register_all_nodes();
4215a4a00b45SMuchun Song }
4216a4a00b45SMuchun Song
4217962de548SKefeng Wang #ifdef CONFIG_SYSCTL
4218962de548SKefeng Wang static void hugetlb_sysctl_init(void);
4219962de548SKefeng Wang #else
hugetlb_sysctl_init(void)4220962de548SKefeng Wang static inline void hugetlb_sysctl_init(void) { }
4221962de548SKefeng Wang #endif
4222962de548SKefeng Wang
hugetlb_init(void)4223a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
4224a3437870SNishanth Aravamudan {
42258382d914SDavidlohr Bueso int i;
42268382d914SDavidlohr Bueso
4227d6995da3SMike Kravetz BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4228d6995da3SMike Kravetz __NR_HPAGEFLAGS);
4229d6995da3SMike Kravetz
4230c2833a5bSMike Kravetz if (!hugepages_supported()) {
4231c2833a5bSMike Kravetz if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4232c2833a5bSMike Kravetz pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
42330ef89d25SBenjamin Herrenschmidt return 0;
4234d715cf80SLiam R. Howlett }
4235d715cf80SLiam R. Howlett
4236282f4214SMike Kravetz /*
4237282f4214SMike Kravetz * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4238282f4214SMike Kravetz * architectures depend on setup being done here.
4239282f4214SMike Kravetz */
4240a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4241282f4214SMike Kravetz if (!parsed_default_hugepagesz) {
4242282f4214SMike Kravetz /*
4243282f4214SMike Kravetz * If we did not parse a default huge page size, set
4244282f4214SMike Kravetz * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4245282f4214SMike Kravetz * number of huge pages for this default size was implicitly
4246282f4214SMike Kravetz * specified, set that here as well.
4247282f4214SMike Kravetz * Note that the implicit setting will overwrite an explicit
4248282f4214SMike Kravetz * setting. A warning will be printed in this case.
4249282f4214SMike Kravetz */
4250282f4214SMike Kravetz default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4251f8b74815SVaishali Thakkar if (default_hstate_max_huge_pages) {
4252282f4214SMike Kravetz if (default_hstate.max_huge_pages) {
4253282f4214SMike Kravetz char buf[32];
4254282f4214SMike Kravetz
4255282f4214SMike Kravetz string_get_size(huge_page_size(&default_hstate),
4256282f4214SMike Kravetz 1, STRING_UNITS_2, buf, 32);
4257282f4214SMike Kravetz pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4258282f4214SMike Kravetz default_hstate.max_huge_pages, buf);
4259282f4214SMike Kravetz pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4260282f4214SMike Kravetz default_hstate_max_huge_pages);
4261282f4214SMike Kravetz }
4262282f4214SMike Kravetz default_hstate.max_huge_pages =
4263282f4214SMike Kravetz default_hstate_max_huge_pages;
4264b5389086SZhenguo Yao
42650a7a0f6fSPeng Liu for_each_online_node(i)
4266b5389086SZhenguo Yao default_hstate.max_huge_pages_node[i] =
4267b5389086SZhenguo Yao default_hugepages_in_node[i];
4268282f4214SMike Kravetz }
4269f8b74815SVaishali Thakkar }
4270a3437870SNishanth Aravamudan
4271cf11e85fSRoman Gushchin hugetlb_cma_check();
4272a3437870SNishanth Aravamudan hugetlb_init_hstates();
4273aa888a74SAndi Kleen gather_bootmem_prealloc();
4274a3437870SNishanth Aravamudan report_hugepages();
4275a3437870SNishanth Aravamudan
4276a3437870SNishanth Aravamudan hugetlb_sysfs_init();
42777179e7bfSJianguo Wu hugetlb_cgroup_file_init();
4278962de548SKefeng Wang hugetlb_sysctl_init();
42799a305230SLee Schermerhorn
42808382d914SDavidlohr Bueso #ifdef CONFIG_SMP
42818382d914SDavidlohr Bueso num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
42828382d914SDavidlohr Bueso #else
42838382d914SDavidlohr Bueso num_fault_mutexes = 1;
42848382d914SDavidlohr Bueso #endif
4285c672c7f2SMike Kravetz hugetlb_fault_mutex_table =
42866da2ec56SKees Cook kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
42876da2ec56SKees Cook GFP_KERNEL);
4288c672c7f2SMike Kravetz BUG_ON(!hugetlb_fault_mutex_table);
42898382d914SDavidlohr Bueso
42908382d914SDavidlohr Bueso for (i = 0; i < num_fault_mutexes; i++)
4291c672c7f2SMike Kravetz mutex_init(&hugetlb_fault_mutex_table[i]);
4292a3437870SNishanth Aravamudan return 0;
4293a3437870SNishanth Aravamudan }
42943e89e1c5SPaul Gortmaker subsys_initcall(hugetlb_init);
4295a3437870SNishanth Aravamudan
4296ae94da89SMike Kravetz /* Overwritten by architectures with more huge page sizes */
__init(weak)4297ae94da89SMike Kravetz bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
42989fee021dSVaishali Thakkar {
4299ae94da89SMike Kravetz return size == HPAGE_SIZE;
43009fee021dSVaishali Thakkar }
43019fee021dSVaishali Thakkar
hugetlb_add_hstate(unsigned int order)4302d00181b9SKirill A. Shutemov void __init hugetlb_add_hstate(unsigned int order)
4303a3437870SNishanth Aravamudan {
4304a3437870SNishanth Aravamudan struct hstate *h;
43058faa8b07SAndi Kleen unsigned long i;
43068faa8b07SAndi Kleen
4307a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) {
4308a3437870SNishanth Aravamudan return;
4309a3437870SNishanth Aravamudan }
431047d38344SAneesh Kumar K.V BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4311a3437870SNishanth Aravamudan BUG_ON(order == 0);
431247d38344SAneesh Kumar K.V h = &hstates[hugetlb_max_hstate++];
431329383967SMike Kravetz mutex_init(&h->resize_lock);
4314a3437870SNishanth Aravamudan h->order = order;
4315aca78307SMiaohe Lin h->mask = ~(huge_page_size(h) - 1);
43168faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i)
43178faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]);
43180edaecfaSAneesh Kumar K.V INIT_LIST_HEAD(&h->hugepage_activelist);
431954f18d35SAndrew Morton h->next_nid_to_alloc = first_memory_node;
432054f18d35SAndrew Morton h->next_nid_to_free = first_memory_node;
4321a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4322c2c3a60aSMiaohe Lin huge_page_size(h)/SZ_1K);
43238faa8b07SAndi Kleen
4324a3437870SNishanth Aravamudan parsed_hstate = h;
4325a3437870SNishanth Aravamudan }
4326a3437870SNishanth Aravamudan
hugetlb_node_alloc_supported(void)4327b5389086SZhenguo Yao bool __init __weak hugetlb_node_alloc_supported(void)
4328b5389086SZhenguo Yao {
4329b5389086SZhenguo Yao return true;
4330b5389086SZhenguo Yao }
4331f87442f4SPeng Liu
hugepages_clear_pages_in_node(void)4332f87442f4SPeng Liu static void __init hugepages_clear_pages_in_node(void)
4333f87442f4SPeng Liu {
4334f87442f4SPeng Liu if (!hugetlb_max_hstate) {
4335f87442f4SPeng Liu default_hstate_max_huge_pages = 0;
4336f87442f4SPeng Liu memset(default_hugepages_in_node, 0,
433710395680SMiaohe Lin sizeof(default_hugepages_in_node));
4338f87442f4SPeng Liu } else {
4339f87442f4SPeng Liu parsed_hstate->max_huge_pages = 0;
4340f87442f4SPeng Liu memset(parsed_hstate->max_huge_pages_node, 0,
434110395680SMiaohe Lin sizeof(parsed_hstate->max_huge_pages_node));
4342f87442f4SPeng Liu }
4343f87442f4SPeng Liu }
4344f87442f4SPeng Liu
4345282f4214SMike Kravetz /*
4346282f4214SMike Kravetz * hugepages command line processing
4347282f4214SMike Kravetz * hugepages normally follows a valid hugepagsz or default_hugepagsz
4348282f4214SMike Kravetz * specification. If not, ignore the hugepages value. hugepages can also
4349282f4214SMike Kravetz * be the first huge page command line option in which case it implicitly
4350282f4214SMike Kravetz * specifies the number of huge pages for the default size.
4351282f4214SMike Kravetz */
hugepages_setup(char * s)4352282f4214SMike Kravetz static int __init hugepages_setup(char *s)
4353a3437870SNishanth Aravamudan {
4354a3437870SNishanth Aravamudan unsigned long *mhp;
43558faa8b07SAndi Kleen static unsigned long *last_mhp;
4356b5389086SZhenguo Yao int node = NUMA_NO_NODE;
4357b5389086SZhenguo Yao int count;
4358b5389086SZhenguo Yao unsigned long tmp;
4359b5389086SZhenguo Yao char *p = s;
4360a3437870SNishanth Aravamudan
43619fee021dSVaishali Thakkar if (!parsed_valid_hugepagesz) {
4362282f4214SMike Kravetz pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
43639fee021dSVaishali Thakkar parsed_valid_hugepagesz = true;
4364f81f6e4bSPeng Liu return 1;
43659fee021dSVaishali Thakkar }
4366282f4214SMike Kravetz
4367a3437870SNishanth Aravamudan /*
4368282f4214SMike Kravetz * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4369282f4214SMike Kravetz * yet, so this hugepages= parameter goes to the "default hstate".
4370282f4214SMike Kravetz * Otherwise, it goes with the previously parsed hugepagesz or
4371282f4214SMike Kravetz * default_hugepagesz.
4372a3437870SNishanth Aravamudan */
43739fee021dSVaishali Thakkar else if (!hugetlb_max_hstate)
4374a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages;
4375a3437870SNishanth Aravamudan else
4376a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages;
4377a3437870SNishanth Aravamudan
43788faa8b07SAndi Kleen if (mhp == last_mhp) {
4379282f4214SMike Kravetz pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4380f81f6e4bSPeng Liu return 1;
43818faa8b07SAndi Kleen }
43828faa8b07SAndi Kleen
4383b5389086SZhenguo Yao while (*p) {
4384b5389086SZhenguo Yao count = 0;
4385b5389086SZhenguo Yao if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4386b5389086SZhenguo Yao goto invalid;
4387b5389086SZhenguo Yao /* Parameter is node format */
4388b5389086SZhenguo Yao if (p[count] == ':') {
4389b5389086SZhenguo Yao if (!hugetlb_node_alloc_supported()) {
4390b5389086SZhenguo Yao pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4391f81f6e4bSPeng Liu return 1;
4392b5389086SZhenguo Yao }
43930a7a0f6fSPeng Liu if (tmp >= MAX_NUMNODES || !node_online(tmp))
4394e79ce983SLiu Yuntao goto invalid;
43950a7a0f6fSPeng Liu node = array_index_nospec(tmp, MAX_NUMNODES);
4396b5389086SZhenguo Yao p += count + 1;
4397b5389086SZhenguo Yao /* Parse hugepages */
4398b5389086SZhenguo Yao if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4399b5389086SZhenguo Yao goto invalid;
4400b5389086SZhenguo Yao if (!hugetlb_max_hstate)
4401b5389086SZhenguo Yao default_hugepages_in_node[node] = tmp;
4402b5389086SZhenguo Yao else
4403b5389086SZhenguo Yao parsed_hstate->max_huge_pages_node[node] = tmp;
4404b5389086SZhenguo Yao *mhp += tmp;
4405b5389086SZhenguo Yao /* Go to parse next node*/
4406b5389086SZhenguo Yao if (p[count] == ',')
4407b5389086SZhenguo Yao p += count + 1;
4408b5389086SZhenguo Yao else
4409b5389086SZhenguo Yao break;
4410b5389086SZhenguo Yao } else {
4411b5389086SZhenguo Yao if (p != s)
4412b5389086SZhenguo Yao goto invalid;
4413b5389086SZhenguo Yao *mhp = tmp;
4414b5389086SZhenguo Yao break;
4415b5389086SZhenguo Yao }
4416b5389086SZhenguo Yao }
4417a3437870SNishanth Aravamudan
44188faa8b07SAndi Kleen /*
44198faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init.
442004adbc3fSMiaohe Lin * But we need to allocate gigantic hstates here early to still
44218faa8b07SAndi Kleen * use the bootmem allocator.
44228faa8b07SAndi Kleen */
442304adbc3fSMiaohe Lin if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
44248faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate);
44258faa8b07SAndi Kleen
44268faa8b07SAndi Kleen last_mhp = mhp;
44278faa8b07SAndi Kleen
4428a3437870SNishanth Aravamudan return 1;
4429b5389086SZhenguo Yao
4430b5389086SZhenguo Yao invalid:
4431b5389086SZhenguo Yao pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4432f87442f4SPeng Liu hugepages_clear_pages_in_node();
4433f81f6e4bSPeng Liu return 1;
4434a3437870SNishanth Aravamudan }
4435282f4214SMike Kravetz __setup("hugepages=", hugepages_setup);
4436e11bfbfcSNick Piggin
4437282f4214SMike Kravetz /*
4438282f4214SMike Kravetz * hugepagesz command line processing
4439282f4214SMike Kravetz * A specific huge page size can only be specified once with hugepagesz.
4440282f4214SMike Kravetz * hugepagesz is followed by hugepages on the command line. The global
4441282f4214SMike Kravetz * variable 'parsed_valid_hugepagesz' is used to determine if prior
4442282f4214SMike Kravetz * hugepagesz argument was valid.
4443282f4214SMike Kravetz */
hugepagesz_setup(char * s)4444359f2544SMike Kravetz static int __init hugepagesz_setup(char *s)
4445e11bfbfcSNick Piggin {
4446359f2544SMike Kravetz unsigned long size;
4447282f4214SMike Kravetz struct hstate *h;
4448282f4214SMike Kravetz
4449282f4214SMike Kravetz parsed_valid_hugepagesz = false;
4450359f2544SMike Kravetz size = (unsigned long)memparse(s, NULL);
4451359f2544SMike Kravetz
4452359f2544SMike Kravetz if (!arch_hugetlb_valid_size(size)) {
4453282f4214SMike Kravetz pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4454f81f6e4bSPeng Liu return 1;
4455359f2544SMike Kravetz }
4456359f2544SMike Kravetz
4457282f4214SMike Kravetz h = size_to_hstate(size);
4458282f4214SMike Kravetz if (h) {
4459282f4214SMike Kravetz /*
4460282f4214SMike Kravetz * hstate for this size already exists. This is normally
4461282f4214SMike Kravetz * an error, but is allowed if the existing hstate is the
4462282f4214SMike Kravetz * default hstate. More specifically, it is only allowed if
4463282f4214SMike Kravetz * the number of huge pages for the default hstate was not
4464282f4214SMike Kravetz * previously specified.
4465282f4214SMike Kravetz */
4466282f4214SMike Kravetz if (!parsed_default_hugepagesz || h != &default_hstate ||
4467282f4214SMike Kravetz default_hstate.max_huge_pages) {
4468282f4214SMike Kravetz pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4469f81f6e4bSPeng Liu return 1;
447038237830SMike Kravetz }
447138237830SMike Kravetz
4472282f4214SMike Kravetz /*
4473282f4214SMike Kravetz * No need to call hugetlb_add_hstate() as hstate already
4474282f4214SMike Kravetz * exists. But, do set parsed_hstate so that a following
4475282f4214SMike Kravetz * hugepages= parameter will be applied to this hstate.
4476282f4214SMike Kravetz */
4477282f4214SMike Kravetz parsed_hstate = h;
4478282f4214SMike Kravetz parsed_valid_hugepagesz = true;
4479e11bfbfcSNick Piggin return 1;
4480e11bfbfcSNick Piggin }
4481282f4214SMike Kravetz
4482359f2544SMike Kravetz hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4483282f4214SMike Kravetz parsed_valid_hugepagesz = true;
4484359f2544SMike Kravetz return 1;
4485359f2544SMike Kravetz }
4486359f2544SMike Kravetz __setup("hugepagesz=", hugepagesz_setup);
4487359f2544SMike Kravetz
4488282f4214SMike Kravetz /*
4489282f4214SMike Kravetz * default_hugepagesz command line input
4490282f4214SMike Kravetz * Only one instance of default_hugepagesz allowed on command line.
4491282f4214SMike Kravetz */
default_hugepagesz_setup(char * s)4492ae94da89SMike Kravetz static int __init default_hugepagesz_setup(char *s)
4493e11bfbfcSNick Piggin {
4494ae94da89SMike Kravetz unsigned long size;
4495b5389086SZhenguo Yao int i;
4496ae94da89SMike Kravetz
4497282f4214SMike Kravetz parsed_valid_hugepagesz = false;
4498282f4214SMike Kravetz if (parsed_default_hugepagesz) {
4499282f4214SMike Kravetz pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4500f81f6e4bSPeng Liu return 1;
4501282f4214SMike Kravetz }
4502282f4214SMike Kravetz
4503282f4214SMike Kravetz size = (unsigned long)memparse(s, NULL);
4504282f4214SMike Kravetz
4505282f4214SMike Kravetz if (!arch_hugetlb_valid_size(size)) {
4506282f4214SMike Kravetz pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4507f81f6e4bSPeng Liu return 1;
4508282f4214SMike Kravetz }
4509282f4214SMike Kravetz
4510282f4214SMike Kravetz hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4511282f4214SMike Kravetz parsed_valid_hugepagesz = true;
4512282f4214SMike Kravetz parsed_default_hugepagesz = true;
4513282f4214SMike Kravetz default_hstate_idx = hstate_index(size_to_hstate(size));
4514282f4214SMike Kravetz
4515282f4214SMike Kravetz /*
4516282f4214SMike Kravetz * The number of default huge pages (for this size) could have been
4517282f4214SMike Kravetz * specified as the first hugetlb parameter: hugepages=X. If so,
4518282f4214SMike Kravetz * then default_hstate_max_huge_pages is set. If the default huge
451923baf831SKirill A. Shutemov * page size is gigantic (> MAX_ORDER), then the pages must be
4520282f4214SMike Kravetz * allocated here from bootmem allocator.
4521282f4214SMike Kravetz */
4522282f4214SMike Kravetz if (default_hstate_max_huge_pages) {
4523282f4214SMike Kravetz default_hstate.max_huge_pages = default_hstate_max_huge_pages;
45240a7a0f6fSPeng Liu for_each_online_node(i)
4525b5389086SZhenguo Yao default_hstate.max_huge_pages_node[i] =
4526b5389086SZhenguo Yao default_hugepages_in_node[i];
4527282f4214SMike Kravetz if (hstate_is_gigantic(&default_hstate))
4528282f4214SMike Kravetz hugetlb_hstate_alloc_pages(&default_hstate);
4529282f4214SMike Kravetz default_hstate_max_huge_pages = 0;
4530282f4214SMike Kravetz }
4531282f4214SMike Kravetz
4532e11bfbfcSNick Piggin return 1;
4533e11bfbfcSNick Piggin }
4534ae94da89SMike Kravetz __setup("default_hugepagesz=", default_hugepagesz_setup);
4535a3437870SNishanth Aravamudan
policy_mbind_nodemask(gfp_t gfp)4536d2226ebdSFeng Tang static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4537d2226ebdSFeng Tang {
4538d2226ebdSFeng Tang #ifdef CONFIG_NUMA
4539d2226ebdSFeng Tang struct mempolicy *mpol = get_task_policy(current);
4540d2226ebdSFeng Tang
4541d2226ebdSFeng Tang /*
4542d2226ebdSFeng Tang * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4543d2226ebdSFeng Tang * (from policy_nodemask) specifically for hugetlb case
4544d2226ebdSFeng Tang */
4545d2226ebdSFeng Tang if (mpol->mode == MPOL_BIND &&
4546d2226ebdSFeng Tang (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4547d2226ebdSFeng Tang cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4548d2226ebdSFeng Tang return &mpol->nodes;
4549d2226ebdSFeng Tang #endif
4550d2226ebdSFeng Tang return NULL;
4551d2226ebdSFeng Tang }
4552d2226ebdSFeng Tang
allowed_mems_nr(struct hstate * h)45538ca39e68SMuchun Song static unsigned int allowed_mems_nr(struct hstate *h)
45548a213460SNishanth Aravamudan {
45558a213460SNishanth Aravamudan int node;
45568a213460SNishanth Aravamudan unsigned int nr = 0;
4557d2226ebdSFeng Tang nodemask_t *mbind_nodemask;
45588ca39e68SMuchun Song unsigned int *array = h->free_huge_pages_node;
45598ca39e68SMuchun Song gfp_t gfp_mask = htlb_alloc_mask(h);
45608a213460SNishanth Aravamudan
4561d2226ebdSFeng Tang mbind_nodemask = policy_mbind_nodemask(gfp_mask);
45628ca39e68SMuchun Song for_each_node_mask(node, cpuset_current_mems_allowed) {
4563d2226ebdSFeng Tang if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
45648a213460SNishanth Aravamudan nr += array[node];
45658ca39e68SMuchun Song }
45668a213460SNishanth Aravamudan
45678a213460SNishanth Aravamudan return nr;
45688a213460SNishanth Aravamudan }
45698a213460SNishanth Aravamudan
45708a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
proc_hugetlb_doulongvec_minmax(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos,unsigned long * out)457117743798SMuchun Song static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
457217743798SMuchun Song void *buffer, size_t *length,
457317743798SMuchun Song loff_t *ppos, unsigned long *out)
457417743798SMuchun Song {
457517743798SMuchun Song struct ctl_table dup_table;
457617743798SMuchun Song
457717743798SMuchun Song /*
457817743798SMuchun Song * In order to avoid races with __do_proc_doulongvec_minmax(), we
457917743798SMuchun Song * can duplicate the @table and alter the duplicate of it.
458017743798SMuchun Song */
458117743798SMuchun Song dup_table = *table;
458217743798SMuchun Song dup_table.data = out;
458317743798SMuchun Song
458417743798SMuchun Song return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
458517743798SMuchun Song }
458617743798SMuchun Song
hugetlb_sysctl_handler_common(bool obey_mempolicy,struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)458706808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
458806808b08SLee Schermerhorn struct ctl_table *table, int write,
458932927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
45901da177e4SLinus Torvalds {
4591e5ff2159SAndi Kleen struct hstate *h = &default_hstate;
4592238d3c13SDavid Rientjes unsigned long tmp = h->max_huge_pages;
459308d4a246SMichal Hocko int ret;
4594e5ff2159SAndi Kleen
4595457c1b27SNishanth Aravamudan if (!hugepages_supported())
459686613628SJan Stancek return -EOPNOTSUPP;
4597457c1b27SNishanth Aravamudan
459817743798SMuchun Song ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
459917743798SMuchun Song &tmp);
460008d4a246SMichal Hocko if (ret)
460108d4a246SMichal Hocko goto out;
4602e5ff2159SAndi Kleen
4603238d3c13SDavid Rientjes if (write)
4604238d3c13SDavid Rientjes ret = __nr_hugepages_store_common(obey_mempolicy, h,
4605238d3c13SDavid Rientjes NUMA_NO_NODE, tmp, *length);
460608d4a246SMichal Hocko out:
460708d4a246SMichal Hocko return ret;
46081da177e4SLinus Torvalds }
4609396faf03SMel Gorman
hugetlb_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)4610962de548SKefeng Wang static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
461132927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
461206808b08SLee Schermerhorn {
461306808b08SLee Schermerhorn
461406808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write,
461506808b08SLee Schermerhorn buffer, length, ppos);
461606808b08SLee Schermerhorn }
461706808b08SLee Schermerhorn
461806808b08SLee Schermerhorn #ifdef CONFIG_NUMA
hugetlb_mempolicy_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)4619962de548SKefeng Wang static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
462032927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
462106808b08SLee Schermerhorn {
462206808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write,
462306808b08SLee Schermerhorn buffer, length, ppos);
462406808b08SLee Schermerhorn }
462506808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
462606808b08SLee Schermerhorn
hugetlb_overcommit_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)4627962de548SKefeng Wang static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
462832927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
4629a3d0c6aaSNishanth Aravamudan {
4630a5516438SAndi Kleen struct hstate *h = &default_hstate;
4631e5ff2159SAndi Kleen unsigned long tmp;
463208d4a246SMichal Hocko int ret;
4633e5ff2159SAndi Kleen
4634457c1b27SNishanth Aravamudan if (!hugepages_supported())
463586613628SJan Stancek return -EOPNOTSUPP;
4636457c1b27SNishanth Aravamudan
4637e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages;
4638e5ff2159SAndi Kleen
4639bae7f4aeSLuiz Capitulino if (write && hstate_is_gigantic(h))
4640adbe8726SEric B Munson return -EINVAL;
4641adbe8726SEric B Munson
464217743798SMuchun Song ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
464317743798SMuchun Song &tmp);
464408d4a246SMichal Hocko if (ret)
464508d4a246SMichal Hocko goto out;
4646e5ff2159SAndi Kleen
4647e5ff2159SAndi Kleen if (write) {
4648db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
4649e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp;
4650db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
4651e5ff2159SAndi Kleen }
465208d4a246SMichal Hocko out:
465308d4a246SMichal Hocko return ret;
4654a3d0c6aaSNishanth Aravamudan }
4655a3d0c6aaSNishanth Aravamudan
4656962de548SKefeng Wang static struct ctl_table hugetlb_table[] = {
4657962de548SKefeng Wang {
4658962de548SKefeng Wang .procname = "nr_hugepages",
4659962de548SKefeng Wang .data = NULL,
4660962de548SKefeng Wang .maxlen = sizeof(unsigned long),
4661962de548SKefeng Wang .mode = 0644,
4662962de548SKefeng Wang .proc_handler = hugetlb_sysctl_handler,
4663962de548SKefeng Wang },
4664962de548SKefeng Wang #ifdef CONFIG_NUMA
4665962de548SKefeng Wang {
4666962de548SKefeng Wang .procname = "nr_hugepages_mempolicy",
4667962de548SKefeng Wang .data = NULL,
4668962de548SKefeng Wang .maxlen = sizeof(unsigned long),
4669962de548SKefeng Wang .mode = 0644,
4670962de548SKefeng Wang .proc_handler = &hugetlb_mempolicy_sysctl_handler,
4671962de548SKefeng Wang },
4672962de548SKefeng Wang #endif
4673962de548SKefeng Wang {
4674962de548SKefeng Wang .procname = "hugetlb_shm_group",
4675962de548SKefeng Wang .data = &sysctl_hugetlb_shm_group,
4676962de548SKefeng Wang .maxlen = sizeof(gid_t),
4677962de548SKefeng Wang .mode = 0644,
4678962de548SKefeng Wang .proc_handler = proc_dointvec,
4679962de548SKefeng Wang },
4680962de548SKefeng Wang {
4681962de548SKefeng Wang .procname = "nr_overcommit_hugepages",
4682962de548SKefeng Wang .data = NULL,
4683962de548SKefeng Wang .maxlen = sizeof(unsigned long),
4684962de548SKefeng Wang .mode = 0644,
4685962de548SKefeng Wang .proc_handler = hugetlb_overcommit_handler,
4686962de548SKefeng Wang },
4687962de548SKefeng Wang { }
4688962de548SKefeng Wang };
4689962de548SKefeng Wang
hugetlb_sysctl_init(void)4690962de548SKefeng Wang static void hugetlb_sysctl_init(void)
4691962de548SKefeng Wang {
4692962de548SKefeng Wang register_sysctl_init("vm", hugetlb_table);
4693962de548SKefeng Wang }
46941da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
46951da177e4SLinus Torvalds
hugetlb_report_meminfo(struct seq_file * m)4696e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
46971da177e4SLinus Torvalds {
4698fcb2b0c5SRoman Gushchin struct hstate *h;
4699fcb2b0c5SRoman Gushchin unsigned long total = 0;
4700fcb2b0c5SRoman Gushchin
4701457c1b27SNishanth Aravamudan if (!hugepages_supported())
4702457c1b27SNishanth Aravamudan return;
4703fcb2b0c5SRoman Gushchin
4704fcb2b0c5SRoman Gushchin for_each_hstate(h) {
4705fcb2b0c5SRoman Gushchin unsigned long count = h->nr_huge_pages;
4706fcb2b0c5SRoman Gushchin
4707aca78307SMiaohe Lin total += huge_page_size(h) * count;
4708fcb2b0c5SRoman Gushchin
4709fcb2b0c5SRoman Gushchin if (h == &default_hstate)
4710e1759c21SAlexey Dobriyan seq_printf(m,
47111da177e4SLinus Torvalds "HugePages_Total: %5lu\n"
47121da177e4SLinus Torvalds "HugePages_Free: %5lu\n"
4713b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n"
47147893d1d5SAdam Litke "HugePages_Surp: %5lu\n"
47154f98a2feSRik van Riel "Hugepagesize: %8lu kB\n",
4716fcb2b0c5SRoman Gushchin count,
4717a5516438SAndi Kleen h->free_huge_pages,
4718a5516438SAndi Kleen h->resv_huge_pages,
4719a5516438SAndi Kleen h->surplus_huge_pages,
4720aca78307SMiaohe Lin huge_page_size(h) / SZ_1K);
4721fcb2b0c5SRoman Gushchin }
4722fcb2b0c5SRoman Gushchin
4723aca78307SMiaohe Lin seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
47241da177e4SLinus Torvalds }
47251da177e4SLinus Torvalds
hugetlb_report_node_meminfo(char * buf,int len,int nid)47267981593bSJoe Perches int hugetlb_report_node_meminfo(char *buf, int len, int nid)
47271da177e4SLinus Torvalds {
4728a5516438SAndi Kleen struct hstate *h = &default_hstate;
47297981593bSJoe Perches
4730457c1b27SNishanth Aravamudan if (!hugepages_supported())
4731457c1b27SNishanth Aravamudan return 0;
47327981593bSJoe Perches
47337981593bSJoe Perches return sysfs_emit_at(buf, len,
47341da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n"
4735a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n"
4736a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n",
4737a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid],
4738a5516438SAndi Kleen nid, h->free_huge_pages_node[nid],
4739a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]);
47401da177e4SLinus Torvalds }
47411da177e4SLinus Torvalds
hugetlb_show_meminfo_node(int nid)4742dcadcf1cSGang Li void hugetlb_show_meminfo_node(int nid)
4743949f7ec5SDavid Rientjes {
4744949f7ec5SDavid Rientjes struct hstate *h;
4745949f7ec5SDavid Rientjes
4746457c1b27SNishanth Aravamudan if (!hugepages_supported())
4747457c1b27SNishanth Aravamudan return;
4748457c1b27SNishanth Aravamudan
4749949f7ec5SDavid Rientjes for_each_hstate(h)
4750dcadcf1cSGang Li printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4751949f7ec5SDavid Rientjes nid,
4752949f7ec5SDavid Rientjes h->nr_huge_pages_node[nid],
4753949f7ec5SDavid Rientjes h->free_huge_pages_node[nid],
4754949f7ec5SDavid Rientjes h->surplus_huge_pages_node[nid],
4755aca78307SMiaohe Lin huge_page_size(h) / SZ_1K);
4756949f7ec5SDavid Rientjes }
4757949f7ec5SDavid Rientjes
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)47585d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
47595d317b2bSNaoya Horiguchi {
47605d317b2bSNaoya Horiguchi seq_printf(m, "HugetlbPages:\t%8lu kB\n",
47616c1aa2d3SZhangPeng K(atomic_long_read(&mm->hugetlb_usage)));
47625d317b2bSNaoya Horiguchi }
47635d317b2bSNaoya Horiguchi
47641da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)47651da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
47661da177e4SLinus Torvalds {
4767d0028588SWanpeng Li struct hstate *h;
4768d0028588SWanpeng Li unsigned long nr_total_pages = 0;
4769d0028588SWanpeng Li
4770d0028588SWanpeng Li for_each_hstate(h)
4771d0028588SWanpeng Li nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4772d0028588SWanpeng Li return nr_total_pages;
47731da177e4SLinus Torvalds }
47741da177e4SLinus Torvalds
hugetlb_acct_memory(struct hstate * h,long delta)4775a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
4776fc1b8a73SMel Gorman {
4777fc1b8a73SMel Gorman int ret = -ENOMEM;
4778fc1b8a73SMel Gorman
47790aa7f354SMiaohe Lin if (!delta)
47800aa7f354SMiaohe Lin return 0;
47810aa7f354SMiaohe Lin
4782db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
4783fc1b8a73SMel Gorman /*
4784fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page
4785fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such
4786fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because
4787fc1b8a73SMel Gorman * the reservation is not checked against page availability for the
4788fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel
4789fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in.
4790fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost
4791fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that
4792fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets.
4793fc1b8a73SMel Gorman *
4794fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is
4795fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics,
4796fc1b8a73SMel Gorman * we fall back to check against current free page availability as
4797fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing
4798fc1b8a73SMel Gorman * semantics that cpuset has.
47998ca39e68SMuchun Song *
48008ca39e68SMuchun Song * Apart from cpuset, we also have memory policy mechanism that
48018ca39e68SMuchun Song * also determines from which node the kernel will allocate memory
48028ca39e68SMuchun Song * in a NUMA system. So similar to cpuset, we also should consider
48038ca39e68SMuchun Song * the memory policy of the current task. Similar to the description
48048ca39e68SMuchun Song * above.
4805fc1b8a73SMel Gorman */
4806fc1b8a73SMel Gorman if (delta > 0) {
4807a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0)
4808fc1b8a73SMel Gorman goto out;
4809fc1b8a73SMel Gorman
48108ca39e68SMuchun Song if (delta > allowed_mems_nr(h)) {
4811a5516438SAndi Kleen return_unused_surplus_pages(h, delta);
4812fc1b8a73SMel Gorman goto out;
4813fc1b8a73SMel Gorman }
4814fc1b8a73SMel Gorman }
4815fc1b8a73SMel Gorman
4816fc1b8a73SMel Gorman ret = 0;
4817fc1b8a73SMel Gorman if (delta < 0)
4818a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta);
4819fc1b8a73SMel Gorman
4820fc1b8a73SMel Gorman out:
4821db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
4822fc1b8a73SMel Gorman return ret;
4823fc1b8a73SMel Gorman }
4824fc1b8a73SMel Gorman
hugetlb_vm_op_open(struct vm_area_struct * vma)482584afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
482684afd99bSAndy Whitcroft {
4827f522c3acSJoonsoo Kim struct resv_map *resv = vma_resv_map(vma);
482884afd99bSAndy Whitcroft
482984afd99bSAndy Whitcroft /*
4830612b8a31SMike Kravetz * HPAGE_RESV_OWNER indicates a private mapping.
483184afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present.
483284afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where
483384afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA
483425985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until
483584afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a
483684afd99bSAndy Whitcroft * new reference here without additional locking.
483784afd99bSAndy Whitcroft */
483809a26e83SMike Kravetz if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
483909a26e83SMike Kravetz resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4840f522c3acSJoonsoo Kim kref_get(&resv->refs);
484184afd99bSAndy Whitcroft }
48428d9bfb26SMike Kravetz
4843131a79b4SMike Kravetz /*
4844131a79b4SMike Kravetz * vma_lock structure for sharable mappings is vma specific.
4845612b8a31SMike Kravetz * Clear old pointer (if copied via vm_area_dup) and allocate
4846612b8a31SMike Kravetz * new structure. Before clearing, make sure vma_lock is not
4847612b8a31SMike Kravetz * for this vma.
4848131a79b4SMike Kravetz */
4849131a79b4SMike Kravetz if (vma->vm_flags & VM_MAYSHARE) {
4850612b8a31SMike Kravetz struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4851612b8a31SMike Kravetz
4852612b8a31SMike Kravetz if (vma_lock) {
4853612b8a31SMike Kravetz if (vma_lock->vma != vma) {
4854131a79b4SMike Kravetz vma->vm_private_data = NULL;
48558d9bfb26SMike Kravetz hugetlb_vma_lock_alloc(vma);
4856612b8a31SMike Kravetz } else
4857612b8a31SMike Kravetz pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4858612b8a31SMike Kravetz } else
4859612b8a31SMike Kravetz hugetlb_vma_lock_alloc(vma);
486009a26e83SMike Kravetz }
4861131a79b4SMike Kravetz }
486284afd99bSAndy Whitcroft
hugetlb_vm_op_close(struct vm_area_struct * vma)4863a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4864a1e78772SMel Gorman {
4865a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
48668d9bfb26SMike Kravetz struct resv_map *resv;
486790481622SDavid Gibson struct hugepage_subpool *spool = subpool_vma(vma);
48684e35f483SJoonsoo Kim unsigned long reserve, start, end;
48691c5ecae3SMike Kravetz long gbl_reserve;
487084afd99bSAndy Whitcroft
48718d9bfb26SMike Kravetz hugetlb_vma_lock_free(vma);
48728d9bfb26SMike Kravetz
48738d9bfb26SMike Kravetz resv = vma_resv_map(vma);
48744e35f483SJoonsoo Kim if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
48754e35f483SJoonsoo Kim return;
48764e35f483SJoonsoo Kim
4877a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start);
4878a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end);
487984afd99bSAndy Whitcroft
48804e35f483SJoonsoo Kim reserve = (end - start) - region_count(resv, start, end);
4881e9fe92aeSMina Almasry hugetlb_cgroup_uncharge_counter(resv, start, end);
48827251ff78SAdam Litke if (reserve) {
48831c5ecae3SMike Kravetz /*
48841c5ecae3SMike Kravetz * Decrement reserve counts. The global reserve count may be
48851c5ecae3SMike Kravetz * adjusted if the subpool has a minimum size.
48861c5ecae3SMike Kravetz */
48871c5ecae3SMike Kravetz gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
48881c5ecae3SMike Kravetz hugetlb_acct_memory(h, -gbl_reserve);
48897251ff78SAdam Litke }
4890e9fe92aeSMina Almasry
4891e9fe92aeSMina Almasry kref_put(&resv->refs, resv_map_release);
4892a1e78772SMel Gorman }
4893a1e78772SMel Gorman
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)489431383c68SDan Williams static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
489531383c68SDan Williams {
489631383c68SDan Williams if (addr & ~(huge_page_mask(hstate_vma(vma))))
489731383c68SDan Williams return -EINVAL;
4898b30c14cdSJames Houghton
4899b30c14cdSJames Houghton /*
4900b30c14cdSJames Houghton * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4901b30c14cdSJames Houghton * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4902b30c14cdSJames Houghton * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4903b30c14cdSJames Houghton */
4904b30c14cdSJames Houghton if (addr & ~PUD_MASK) {
4905b30c14cdSJames Houghton /*
4906b30c14cdSJames Houghton * hugetlb_vm_op_split is called right before we attempt to
4907b30c14cdSJames Houghton * split the VMA. We will need to unshare PMDs in the old and
4908b30c14cdSJames Houghton * new VMAs, so let's unshare before we split.
4909b30c14cdSJames Houghton */
4910b30c14cdSJames Houghton unsigned long floor = addr & PUD_MASK;
4911b30c14cdSJames Houghton unsigned long ceil = floor + PUD_SIZE;
4912b30c14cdSJames Houghton
4913b30c14cdSJames Houghton if (floor >= vma->vm_start && ceil <= vma->vm_end)
4914b30c14cdSJames Houghton hugetlb_unshare_pmds(vma, floor, ceil);
4915b30c14cdSJames Houghton }
4916b30c14cdSJames Houghton
491731383c68SDan Williams return 0;
491831383c68SDan Williams }
491931383c68SDan Williams
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)492005ea8860SDan Williams static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
492105ea8860SDan Williams {
4922aca78307SMiaohe Lin return huge_page_size(hstate_vma(vma));
492305ea8860SDan Williams }
492405ea8860SDan Williams
49251da177e4SLinus Torvalds /*
49261da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause
49271da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the
49286c26d310SMiaohe Lin * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
49291da177e4SLinus Torvalds * this far.
49301da177e4SLinus Torvalds */
hugetlb_vm_op_fault(struct vm_fault * vmf)4931b3ec9f33SSouptick Joarder static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
49321da177e4SLinus Torvalds {
49331da177e4SLinus Torvalds BUG();
4934d0217ac0SNick Piggin return 0;
49351da177e4SLinus Torvalds }
49361da177e4SLinus Torvalds
4937eec3636aSJane Chu /*
4938eec3636aSJane Chu * When a new function is introduced to vm_operations_struct and added
4939eec3636aSJane Chu * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4940eec3636aSJane Chu * This is because under System V memory model, mappings created via
4941eec3636aSJane Chu * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4942eec3636aSJane Chu * their original vm_ops are overwritten with shm_vm_ops.
4943eec3636aSJane Chu */
4944f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
4945d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault,
494684afd99bSAndy Whitcroft .open = hugetlb_vm_op_open,
4947a1e78772SMel Gorman .close = hugetlb_vm_op_close,
4948dd3b614fSDmitry Safonov .may_split = hugetlb_vm_op_split,
494905ea8860SDan Williams .pagesize = hugetlb_vm_op_pagesize,
49501da177e4SLinus Torvalds };
49511da177e4SLinus Torvalds
make_huge_pte(struct vm_area_struct * vma,struct page * page,int writable)49521e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
49531e8f889bSDavid Gibson int writable)
495463551ae0SDavid Gibson {
495563551ae0SDavid Gibson pte_t entry;
495679c1c594SChristophe Leroy unsigned int shift = huge_page_shift(hstate_vma(vma));
495763551ae0SDavid Gibson
49581e8f889bSDavid Gibson if (writable) {
4959106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
4960106c992aSGerald Schaefer vma->vm_page_prot)));
496163551ae0SDavid Gibson } else {
4962106c992aSGerald Schaefer entry = huge_pte_wrprotect(mk_huge_pte(page,
4963106c992aSGerald Schaefer vma->vm_page_prot));
496463551ae0SDavid Gibson }
496563551ae0SDavid Gibson entry = pte_mkyoung(entry);
496679c1c594SChristophe Leroy entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
496763551ae0SDavid Gibson
496863551ae0SDavid Gibson return entry;
496963551ae0SDavid Gibson }
497063551ae0SDavid Gibson
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)49711e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
49721e8f889bSDavid Gibson unsigned long address, pte_t *ptep)
49731e8f889bSDavid Gibson {
49741e8f889bSDavid Gibson pte_t entry;
49751e8f889bSDavid Gibson
4976106c992aSGerald Schaefer entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
497732f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
49784b3073e1SRussell King update_mmu_cache(vma, address, ptep);
49791e8f889bSDavid Gibson }
49801e8f889bSDavid Gibson
is_hugetlb_entry_migration(pte_t pte)4981d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte)
49824a705fefSNaoya Horiguchi {
49834a705fefSNaoya Horiguchi swp_entry_t swp;
49844a705fefSNaoya Horiguchi
49854a705fefSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte))
4986d5ed7444SAneesh Kumar K.V return false;
49874a705fefSNaoya Horiguchi swp = pte_to_swp_entry(pte);
4988d79d176aSBaoquan He if (is_migration_entry(swp))
4989d5ed7444SAneesh Kumar K.V return true;
49904a705fefSNaoya Horiguchi else
4991d5ed7444SAneesh Kumar K.V return false;
49924a705fefSNaoya Horiguchi }
49934a705fefSNaoya Horiguchi
is_hugetlb_entry_hwpoisoned(pte_t pte)49943e5c3600SBaoquan He static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
49954a705fefSNaoya Horiguchi {
49964a705fefSNaoya Horiguchi swp_entry_t swp;
49974a705fefSNaoya Horiguchi
49984a705fefSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte))
49993e5c3600SBaoquan He return false;
50004a705fefSNaoya Horiguchi swp = pte_to_swp_entry(pte);
5001d79d176aSBaoquan He if (is_hwpoison_entry(swp))
50023e5c3600SBaoquan He return true;
50034a705fefSNaoya Horiguchi else
50043e5c3600SBaoquan He return false;
50054a705fefSNaoya Horiguchi }
50061e8f889bSDavid Gibson
50074eae4efaSPeter Xu static void
hugetlb_install_folio(struct vm_area_struct * vma,pte_t * ptep,unsigned long addr,struct folio * new_folio,pte_t old,unsigned long sz)5008ea4c353dSSidhartha Kumar hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
5009935d4f0cSRyan Roberts struct folio *new_folio, pte_t old, unsigned long sz)
50104eae4efaSPeter Xu {
50115a2f8d22SPeter Xu pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
50125a2f8d22SPeter Xu
5013ea4c353dSSidhartha Kumar __folio_mark_uptodate(new_folio);
5014d0ce0e47SSidhartha Kumar hugepage_add_new_anon_rmap(new_folio, vma, addr);
50155a2f8d22SPeter Xu if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
50165a2f8d22SPeter Xu newpte = huge_pte_mkuffd_wp(newpte);
5017935d4f0cSRyan Roberts set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
50184eae4efaSPeter Xu hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
5019ea4c353dSSidhartha Kumar folio_set_hugetlb_migratable(new_folio);
50204eae4efaSPeter Xu }
50214eae4efaSPeter Xu
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)502263551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
5023bc70fbf2SPeter Xu struct vm_area_struct *dst_vma,
5024bc70fbf2SPeter Xu struct vm_area_struct *src_vma)
502563551ae0SDavid Gibson {
50263aa4ed80SMiaohe Lin pte_t *src_pte, *dst_pte, entry;
5027ad27ce20SZhangPeng struct folio *pte_folio;
50281c59827dSHugh Dickins unsigned long addr;
5029bc70fbf2SPeter Xu bool cow = is_cow_mapping(src_vma->vm_flags);
5030bc70fbf2SPeter Xu struct hstate *h = hstate_vma(src_vma);
5031a5516438SAndi Kleen unsigned long sz = huge_page_size(h);
50324eae4efaSPeter Xu unsigned long npages = pages_per_huge_page(h);
5033ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
5034e95a9851SMike Kravetz unsigned long last_addr_mask;
5035e8569dd2SAndreas Sandberg int ret = 0;
50361e8f889bSDavid Gibson
5037ac46d4f3SJérôme Glisse if (cow) {
50387d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5039bc70fbf2SPeter Xu src_vma->vm_start,
5040bc70fbf2SPeter Xu src_vma->vm_end);
5041ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
5042e727bfd5SSuren Baghdasaryan vma_assert_write_locked(src_vma);
5043623a1ddfSDavid Hildenbrand raw_write_seqcount_begin(&src->write_protect_seq);
504440549ba8SMike Kravetz } else {
504540549ba8SMike Kravetz /*
504640549ba8SMike Kravetz * For shared mappings the vma lock must be held before
50479c67a207SPeter Xu * calling hugetlb_walk() in the src vma. Otherwise, the
504840549ba8SMike Kravetz * returned ptep could go away if part of a shared pmd and
504940549ba8SMike Kravetz * another thread calls huge_pmd_unshare.
505040549ba8SMike Kravetz */
505140549ba8SMike Kravetz hugetlb_vma_lock_read(src_vma);
5052ac46d4f3SJérôme Glisse }
5053e8569dd2SAndreas Sandberg
5054e95a9851SMike Kravetz last_addr_mask = hugetlb_mask_last_page(h);
5055bc70fbf2SPeter Xu for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5056cb900f41SKirill A. Shutemov spinlock_t *src_ptl, *dst_ptl;
50579c67a207SPeter Xu src_pte = hugetlb_walk(src_vma, addr, sz);
5058e95a9851SMike Kravetz if (!src_pte) {
5059e95a9851SMike Kravetz addr |= last_addr_mask;
5060c74df32cSHugh Dickins continue;
5061e95a9851SMike Kravetz }
5062bc70fbf2SPeter Xu dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5063e8569dd2SAndreas Sandberg if (!dst_pte) {
5064e8569dd2SAndreas Sandberg ret = -ENOMEM;
5065e8569dd2SAndreas Sandberg break;
5066e8569dd2SAndreas Sandberg }
5067c5c99429SLarry Woodman
50685e41540cSMike Kravetz /*
50695e41540cSMike Kravetz * If the pagetables are shared don't copy or take references.
50705e41540cSMike Kravetz *
50713aa4ed80SMiaohe Lin * dst_pte == src_pte is the common case of src/dest sharing.
50725e41540cSMike Kravetz * However, src could have 'unshared' and dst shares with
50733aa4ed80SMiaohe Lin * another vma. So page_count of ptep page is checked instead
50743aa4ed80SMiaohe Lin * to reliably determine whether pte is shared.
50755e41540cSMike Kravetz */
50763aa4ed80SMiaohe Lin if (page_count(virt_to_page(dst_pte)) > 1) {
5077e95a9851SMike Kravetz addr |= last_addr_mask;
5078c5c99429SLarry Woodman continue;
5079e95a9851SMike Kravetz }
5080c5c99429SLarry Woodman
5081cb900f41SKirill A. Shutemov dst_ptl = huge_pte_lock(h, dst, dst_pte);
5082cb900f41SKirill A. Shutemov src_ptl = huge_pte_lockptr(h, src, src_pte);
5083cb900f41SKirill A. Shutemov spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
50844a705fefSNaoya Horiguchi entry = huge_ptep_get(src_pte);
50854eae4efaSPeter Xu again:
50863aa4ed80SMiaohe Lin if (huge_pte_none(entry)) {
50875e41540cSMike Kravetz /*
50883aa4ed80SMiaohe Lin * Skip if src entry none.
50895e41540cSMike Kravetz */
50904a705fefSNaoya Horiguchi ;
5091c2cb0dccSNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
50925a2f8d22SPeter Xu if (!userfaultfd_wp(dst_vma))
5093c2cb0dccSNaoya Horiguchi entry = huge_pte_clear_uffd_wp(entry);
5094935d4f0cSRyan Roberts set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5095c2cb0dccSNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_migration(entry))) {
50964a705fefSNaoya Horiguchi swp_entry_t swp_entry = pte_to_swp_entry(entry);
50975a2f8d22SPeter Xu bool uffd_wp = pte_swp_uffd_wp(entry);
50984a705fefSNaoya Horiguchi
50996c287605SDavid Hildenbrand if (!is_readable_migration_entry(swp_entry) && cow) {
51004a705fefSNaoya Horiguchi /*
51014a705fefSNaoya Horiguchi * COW mappings require pages in both
51024a705fefSNaoya Horiguchi * parent and child to be set to read.
51034a705fefSNaoya Horiguchi */
51044dd845b5SAlistair Popple swp_entry = make_readable_migration_entry(
51054dd845b5SAlistair Popple swp_offset(swp_entry));
51064a705fefSNaoya Horiguchi entry = swp_entry_to_pte(swp_entry);
5107bc70fbf2SPeter Xu if (userfaultfd_wp(src_vma) && uffd_wp)
51085a2f8d22SPeter Xu entry = pte_swp_mkuffd_wp(entry);
5109935d4f0cSRyan Roberts set_huge_pte_at(src, addr, src_pte, entry, sz);
51104a705fefSNaoya Horiguchi }
51115a2f8d22SPeter Xu if (!userfaultfd_wp(dst_vma))
5112bc70fbf2SPeter Xu entry = huge_pte_clear_uffd_wp(entry);
5113935d4f0cSRyan Roberts set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5114bc70fbf2SPeter Xu } else if (unlikely(is_pte_marker(entry))) {
5115af19487fSAxel Rasmussen pte_marker marker = copy_pte_marker(
5116af19487fSAxel Rasmussen pte_to_swp_entry(entry), dst_vma);
5117af19487fSAxel Rasmussen
5118af19487fSAxel Rasmussen if (marker)
5119af19487fSAxel Rasmussen set_huge_pte_at(dst, addr, dst_pte,
5120935d4f0cSRyan Roberts make_pte_marker(marker), sz);
51214a705fefSNaoya Horiguchi } else {
51224eae4efaSPeter Xu entry = huge_ptep_get(src_pte);
5123ad27ce20SZhangPeng pte_folio = page_folio(pte_page(entry));
5124ad27ce20SZhangPeng folio_get(pte_folio);
51254eae4efaSPeter Xu
51264eae4efaSPeter Xu /*
5127fb3d824dSDavid Hildenbrand * Failing to duplicate the anon rmap is a rare case
5128fb3d824dSDavid Hildenbrand * where we see pinned hugetlb pages while they're
5129fb3d824dSDavid Hildenbrand * prone to COW. We need to do the COW earlier during
5130fb3d824dSDavid Hildenbrand * fork.
51314eae4efaSPeter Xu *
51324eae4efaSPeter Xu * When pre-allocating the page or copying data, we
51334eae4efaSPeter Xu * need to be without the pgtable locks since we could
51344eae4efaSPeter Xu * sleep during the process.
51354eae4efaSPeter Xu */
5136ad27ce20SZhangPeng if (!folio_test_anon(pte_folio)) {
5137ad27ce20SZhangPeng page_dup_file_rmap(&pte_folio->page, true);
5138ad27ce20SZhangPeng } else if (page_try_dup_anon_rmap(&pte_folio->page,
5139ad27ce20SZhangPeng true, src_vma)) {
51404eae4efaSPeter Xu pte_t src_pte_old = entry;
5141d0ce0e47SSidhartha Kumar struct folio *new_folio;
51424eae4efaSPeter Xu
51434eae4efaSPeter Xu spin_unlock(src_ptl);
51444eae4efaSPeter Xu spin_unlock(dst_ptl);
51454eae4efaSPeter Xu /* Do not use reserve as it's private owned */
5146d0ce0e47SSidhartha Kumar new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5147d0ce0e47SSidhartha Kumar if (IS_ERR(new_folio)) {
5148ad27ce20SZhangPeng folio_put(pte_folio);
5149d0ce0e47SSidhartha Kumar ret = PTR_ERR(new_folio);
51504eae4efaSPeter Xu break;
51514eae4efaSPeter Xu }
51521cb9dc4bSLiu Shixin ret = copy_user_large_folio(new_folio,
5153ad27ce20SZhangPeng pte_folio,
5154c0e8150eSZhangPeng addr, dst_vma);
5155ad27ce20SZhangPeng folio_put(pte_folio);
51561cb9dc4bSLiu Shixin if (ret) {
51571cb9dc4bSLiu Shixin folio_put(new_folio);
51581cb9dc4bSLiu Shixin break;
51591cb9dc4bSLiu Shixin }
51604eae4efaSPeter Xu
5161d0ce0e47SSidhartha Kumar /* Install the new hugetlb folio if src pte stable */
51624eae4efaSPeter Xu dst_ptl = huge_pte_lock(h, dst, dst_pte);
51634eae4efaSPeter Xu src_ptl = huge_pte_lockptr(h, src, src_pte);
51644eae4efaSPeter Xu spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
51654eae4efaSPeter Xu entry = huge_ptep_get(src_pte);
51664eae4efaSPeter Xu if (!pte_same(src_pte_old, entry)) {
5167bc70fbf2SPeter Xu restore_reserve_on_error(h, dst_vma, addr,
5168d2d7bb44SSidhartha Kumar new_folio);
5169d0ce0e47SSidhartha Kumar folio_put(new_folio);
51703aa4ed80SMiaohe Lin /* huge_ptep of dst_pte won't change as in child */
51714eae4efaSPeter Xu goto again;
51724eae4efaSPeter Xu }
51735a2f8d22SPeter Xu hugetlb_install_folio(dst_vma, dst_pte, addr,
5174935d4f0cSRyan Roberts new_folio, src_pte_old, sz);
51754eae4efaSPeter Xu spin_unlock(src_ptl);
51764eae4efaSPeter Xu spin_unlock(dst_ptl);
51774eae4efaSPeter Xu continue;
51784eae4efaSPeter Xu }
51794eae4efaSPeter Xu
518034ee645eSJoerg Roedel if (cow) {
51810f10851eSJérôme Glisse /*
51820f10851eSJérôme Glisse * No need to notify as we are downgrading page
51830f10851eSJérôme Glisse * table protection not changing it to point
51840f10851eSJérôme Glisse * to a new page.
51850f10851eSJérôme Glisse *
5186ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst
51870f10851eSJérôme Glisse */
51887f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte);
518984894e1cSPeter Xu entry = huge_pte_wrprotect(entry);
519034ee645eSJoerg Roedel }
51914eae4efaSPeter Xu
51925a2f8d22SPeter Xu if (!userfaultfd_wp(dst_vma))
51935a2f8d22SPeter Xu entry = huge_pte_clear_uffd_wp(entry);
51945a2f8d22SPeter Xu
5195935d4f0cSRyan Roberts set_huge_pte_at(dst, addr, dst_pte, entry, sz);
51964eae4efaSPeter Xu hugetlb_count_add(npages, dst);
51971c59827dSHugh Dickins }
5198cb900f41SKirill A. Shutemov spin_unlock(src_ptl);
5199cb900f41SKirill A. Shutemov spin_unlock(dst_ptl);
520063551ae0SDavid Gibson }
520163551ae0SDavid Gibson
5202623a1ddfSDavid Hildenbrand if (cow) {
5203623a1ddfSDavid Hildenbrand raw_write_seqcount_end(&src->write_protect_seq);
5204ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
520540549ba8SMike Kravetz } else {
520640549ba8SMike Kravetz hugetlb_vma_unlock_read(src_vma);
5207623a1ddfSDavid Hildenbrand }
5208e8569dd2SAndreas Sandberg
5209e8569dd2SAndreas Sandberg return ret;
521063551ae0SDavid Gibson }
521163551ae0SDavid Gibson
move_huge_pte(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pte_t * src_pte,pte_t * dst_pte,unsigned long sz)5212550a7d60SMina Almasry static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5213935d4f0cSRyan Roberts unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5214935d4f0cSRyan Roberts unsigned long sz)
5215550a7d60SMina Almasry {
5216550a7d60SMina Almasry struct hstate *h = hstate_vma(vma);
5217550a7d60SMina Almasry struct mm_struct *mm = vma->vm_mm;
5218550a7d60SMina Almasry spinlock_t *src_ptl, *dst_ptl;
5219db110a99SAneesh Kumar K.V pte_t pte;
5220550a7d60SMina Almasry
5221550a7d60SMina Almasry dst_ptl = huge_pte_lock(h, mm, dst_pte);
5222550a7d60SMina Almasry src_ptl = huge_pte_lockptr(h, mm, src_pte);
5223550a7d60SMina Almasry
5224550a7d60SMina Almasry /*
5225550a7d60SMina Almasry * We don't have to worry about the ordering of src and dst ptlocks
52268651a137SLorenzo Stoakes * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5227550a7d60SMina Almasry */
5228550a7d60SMina Almasry if (src_ptl != dst_ptl)
5229550a7d60SMina Almasry spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5230550a7d60SMina Almasry
5231550a7d60SMina Almasry pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
5232935d4f0cSRyan Roberts set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5233550a7d60SMina Almasry
5234550a7d60SMina Almasry if (src_ptl != dst_ptl)
5235550a7d60SMina Almasry spin_unlock(src_ptl);
5236550a7d60SMina Almasry spin_unlock(dst_ptl);
5237550a7d60SMina Almasry }
5238550a7d60SMina Almasry
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)5239550a7d60SMina Almasry int move_hugetlb_page_tables(struct vm_area_struct *vma,
5240550a7d60SMina Almasry struct vm_area_struct *new_vma,
5241550a7d60SMina Almasry unsigned long old_addr, unsigned long new_addr,
5242550a7d60SMina Almasry unsigned long len)
5243550a7d60SMina Almasry {
5244550a7d60SMina Almasry struct hstate *h = hstate_vma(vma);
5245550a7d60SMina Almasry struct address_space *mapping = vma->vm_file->f_mapping;
5246550a7d60SMina Almasry unsigned long sz = huge_page_size(h);
5247550a7d60SMina Almasry struct mm_struct *mm = vma->vm_mm;
5248550a7d60SMina Almasry unsigned long old_end = old_addr + len;
5249e95a9851SMike Kravetz unsigned long last_addr_mask;
5250550a7d60SMina Almasry pte_t *src_pte, *dst_pte;
5251550a7d60SMina Almasry struct mmu_notifier_range range;
52523d0b95cdSBaolin Wang bool shared_pmd = false;
5253550a7d60SMina Almasry
52547d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5255550a7d60SMina Almasry old_end);
5256550a7d60SMina Almasry adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
52573d0b95cdSBaolin Wang /*
52583d0b95cdSBaolin Wang * In case of shared PMDs, we should cover the maximum possible
52593d0b95cdSBaolin Wang * range.
52603d0b95cdSBaolin Wang */
52613d0b95cdSBaolin Wang flush_cache_range(vma, range.start, range.end);
52623d0b95cdSBaolin Wang
5263550a7d60SMina Almasry mmu_notifier_invalidate_range_start(&range);
5264e95a9851SMike Kravetz last_addr_mask = hugetlb_mask_last_page(h);
5265550a7d60SMina Almasry /* Prevent race with file truncation */
526640549ba8SMike Kravetz hugetlb_vma_lock_write(vma);
5267550a7d60SMina Almasry i_mmap_lock_write(mapping);
5268550a7d60SMina Almasry for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
52699c67a207SPeter Xu src_pte = hugetlb_walk(vma, old_addr, sz);
5270e95a9851SMike Kravetz if (!src_pte) {
5271e95a9851SMike Kravetz old_addr |= last_addr_mask;
5272e95a9851SMike Kravetz new_addr |= last_addr_mask;
5273550a7d60SMina Almasry continue;
5274e95a9851SMike Kravetz }
5275550a7d60SMina Almasry if (huge_pte_none(huge_ptep_get(src_pte)))
5276550a7d60SMina Almasry continue;
5277550a7d60SMina Almasry
52784ddb4d91SMike Kravetz if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
52793d0b95cdSBaolin Wang shared_pmd = true;
52804ddb4d91SMike Kravetz old_addr |= last_addr_mask;
52814ddb4d91SMike Kravetz new_addr |= last_addr_mask;
5282550a7d60SMina Almasry continue;
52833d0b95cdSBaolin Wang }
5284550a7d60SMina Almasry
5285550a7d60SMina Almasry dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5286550a7d60SMina Almasry if (!dst_pte)
5287550a7d60SMina Almasry break;
5288550a7d60SMina Almasry
5289935d4f0cSRyan Roberts move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5290550a7d60SMina Almasry }
52913d0b95cdSBaolin Wang
52923d0b95cdSBaolin Wang if (shared_pmd)
5293f720b471SKefeng Wang flush_hugetlb_tlb_range(vma, range.start, range.end);
52943d0b95cdSBaolin Wang else
5295f720b471SKefeng Wang flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5296550a7d60SMina Almasry mmu_notifier_invalidate_range_end(&range);
529713e4ad2cSNadav Amit i_mmap_unlock_write(mapping);
529840549ba8SMike Kravetz hugetlb_vma_unlock_write(vma);
5299550a7d60SMina Almasry
5300550a7d60SMina Almasry return len + old_addr - old_end;
5301550a7d60SMina Almasry }
5302550a7d60SMina Almasry
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)53032820b0f0SRik van Riel void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
530424669e58SAneesh Kumar K.V unsigned long start, unsigned long end,
530505e90bd0SPeter Xu struct page *ref_page, zap_flags_t zap_flags)
530663551ae0SDavid Gibson {
530763551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm;
530863551ae0SDavid Gibson unsigned long address;
5309c7546f8fSDavid Gibson pte_t *ptep;
531063551ae0SDavid Gibson pte_t pte;
5311cb900f41SKirill A. Shutemov spinlock_t *ptl;
531263551ae0SDavid Gibson struct page *page;
5313a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
5314a5516438SAndi Kleen unsigned long sz = huge_page_size(h);
5315e95a9851SMike Kravetz unsigned long last_addr_mask;
5316a4a118f2SNadav Amit bool force_flush = false;
5317a5516438SAndi Kleen
531863551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma));
5319a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h));
5320a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h));
532163551ae0SDavid Gibson
532207e32661SAneesh Kumar K.V /*
532307e32661SAneesh Kumar K.V * This is a hugetlb vma, all the pte entries should point
532407e32661SAneesh Kumar K.V * to huge page.
532507e32661SAneesh Kumar K.V */
5326ed6a7935SPeter Zijlstra tlb_change_page_size(tlb, sz);
532724669e58SAneesh Kumar K.V tlb_start_vma(tlb, vma);
5328dff11abeSMike Kravetz
5329e95a9851SMike Kravetz last_addr_mask = hugetlb_mask_last_page(h);
5330569f48b8SHillf Danton address = start;
5331569f48b8SHillf Danton for (; address < end; address += sz) {
53329c67a207SPeter Xu ptep = hugetlb_walk(vma, address, sz);
5333e95a9851SMike Kravetz if (!ptep) {
5334e95a9851SMike Kravetz address |= last_addr_mask;
5335c7546f8fSDavid Gibson continue;
5336e95a9851SMike Kravetz }
5337c7546f8fSDavid Gibson
5338cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep);
53394ddb4d91SMike Kravetz if (huge_pmd_unshare(mm, vma, address, ptep)) {
534031d49da5SAneesh Kumar K.V spin_unlock(ptl);
5341a4a118f2SNadav Amit tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5342a4a118f2SNadav Amit force_flush = true;
53434ddb4d91SMike Kravetz address |= last_addr_mask;
534431d49da5SAneesh Kumar K.V continue;
534531d49da5SAneesh Kumar K.V }
534639dde65cSChen, Kenneth W
53476629326bSHillf Danton pte = huge_ptep_get(ptep);
534831d49da5SAneesh Kumar K.V if (huge_pte_none(pte)) {
534931d49da5SAneesh Kumar K.V spin_unlock(ptl);
535031d49da5SAneesh Kumar K.V continue;
535131d49da5SAneesh Kumar K.V }
53526629326bSHillf Danton
53536629326bSHillf Danton /*
53549fbc1f63SNaoya Horiguchi * Migrating hugepage or HWPoisoned hugepage is already
53559fbc1f63SNaoya Horiguchi * unmapped and its refcount is dropped, so just clear pte here.
53566629326bSHillf Danton */
53579fbc1f63SNaoya Horiguchi if (unlikely(!pte_present(pte))) {
535805e90bd0SPeter Xu /*
535905e90bd0SPeter Xu * If the pte was wr-protected by uffd-wp in any of the
536005e90bd0SPeter Xu * swap forms, meanwhile the caller does not want to
536105e90bd0SPeter Xu * drop the uffd-wp bit in this zap, then replace the
536205e90bd0SPeter Xu * pte with a marker.
536305e90bd0SPeter Xu */
536405e90bd0SPeter Xu if (pte_swp_uffd_wp_any(pte) &&
536505e90bd0SPeter Xu !(zap_flags & ZAP_FLAG_DROP_MARKER))
536605e90bd0SPeter Xu set_huge_pte_at(mm, address, ptep,
5367935d4f0cSRyan Roberts make_pte_marker(PTE_MARKER_UFFD_WP),
5368935d4f0cSRyan Roberts sz);
536905e90bd0SPeter Xu else
53709386fac3SPunit Agrawal huge_pte_clear(mm, address, ptep, sz);
537131d49da5SAneesh Kumar K.V spin_unlock(ptl);
537231d49da5SAneesh Kumar K.V continue;
53738c4894c6SNaoya Horiguchi }
53746629326bSHillf Danton
53756629326bSHillf Danton page = pte_page(pte);
537604f2cbe3SMel Gorman /*
537704f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific
537804f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we
537904f2cbe3SMel Gorman * are about to unmap is the actual page of interest.
538004f2cbe3SMel Gorman */
538104f2cbe3SMel Gorman if (ref_page) {
538231d49da5SAneesh Kumar K.V if (page != ref_page) {
538331d49da5SAneesh Kumar K.V spin_unlock(ptl);
538431d49da5SAneesh Kumar K.V continue;
538531d49da5SAneesh Kumar K.V }
538604f2cbe3SMel Gorman /*
538704f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that
538804f2cbe3SMel Gorman * future faults in this VMA will fail rather than
538904f2cbe3SMel Gorman * looking like data was lost
539004f2cbe3SMel Gorman */
539104f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
539204f2cbe3SMel Gorman }
539304f2cbe3SMel Gorman
5394c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep);
5395b528e4b6SAneesh Kumar K.V tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5396106c992aSGerald Schaefer if (huge_pte_dirty(pte))
53976649a386SKen Chen set_page_dirty(page);
539805e90bd0SPeter Xu /* Leave a uffd-wp pte marker if needed */
539905e90bd0SPeter Xu if (huge_pte_uffd_wp(pte) &&
540005e90bd0SPeter Xu !(zap_flags & ZAP_FLAG_DROP_MARKER))
540105e90bd0SPeter Xu set_huge_pte_at(mm, address, ptep,
5402935d4f0cSRyan Roberts make_pte_marker(PTE_MARKER_UFFD_WP),
5403935d4f0cSRyan Roberts sz);
54045d317b2bSNaoya Horiguchi hugetlb_count_sub(pages_per_huge_page(h), mm);
5405cea86fe2SHugh Dickins page_remove_rmap(page, vma, true);
540631d49da5SAneesh Kumar K.V
5407cb900f41SKirill A. Shutemov spin_unlock(ptl);
5408e77b0852SAneesh Kumar K.V tlb_remove_page_size(tlb, page, huge_page_size(h));
540924669e58SAneesh Kumar K.V /*
541031d49da5SAneesh Kumar K.V * Bail out after unmapping reference page if supplied
541124669e58SAneesh Kumar K.V */
541231d49da5SAneesh Kumar K.V if (ref_page)
541331d49da5SAneesh Kumar K.V break;
5414fe1668aeSChen, Kenneth W }
541524669e58SAneesh Kumar K.V tlb_end_vma(tlb, vma);
5416a4a118f2SNadav Amit
5417a4a118f2SNadav Amit /*
5418a4a118f2SNadav Amit * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5419a4a118f2SNadav Amit * could defer the flush until now, since by holding i_mmap_rwsem we
5420a4a118f2SNadav Amit * guaranteed that the last refernece would not be dropped. But we must
5421a4a118f2SNadav Amit * do the flushing before we return, as otherwise i_mmap_rwsem will be
5422a4a118f2SNadav Amit * dropped and the last reference to the shared PMDs page might be
5423a4a118f2SNadav Amit * dropped as well.
5424a4a118f2SNadav Amit *
5425a4a118f2SNadav Amit * In theory we could defer the freeing of the PMD pages as well, but
5426a4a118f2SNadav Amit * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5427a4a118f2SNadav Amit * detect sharing, so we cannot defer the release of the page either.
5428a4a118f2SNadav Amit * Instead, do flush now.
5429a4a118f2SNadav Amit */
5430a4a118f2SNadav Amit if (force_flush)
5431a4a118f2SNadav Amit tlb_flush_mmu_tlbonly(tlb);
54321da177e4SLinus Torvalds }
543363551ae0SDavid Gibson
__hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)54342820b0f0SRik van Riel void __hugetlb_zap_begin(struct vm_area_struct *vma,
54352820b0f0SRik van Riel unsigned long *start, unsigned long *end)
5436d833352aSMel Gorman {
54372820b0f0SRik van Riel if (!vma->vm_file) /* hugetlbfs_file_mmap error */
54382820b0f0SRik van Riel return;
5439131a79b4SMike Kravetz
54402820b0f0SRik van Riel adjust_range_if_pmd_sharing_possible(vma, start, end);
54412820b0f0SRik van Riel hugetlb_vma_lock_write(vma);
54422820b0f0SRik van Riel if (vma->vm_file)
54432820b0f0SRik van Riel i_mmap_lock_write(vma->vm_file->f_mapping);
54442820b0f0SRik van Riel }
54452820b0f0SRik van Riel
__hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)54462820b0f0SRik van Riel void __hugetlb_zap_end(struct vm_area_struct *vma,
54472820b0f0SRik van Riel struct zap_details *details)
54482820b0f0SRik van Riel {
54492820b0f0SRik van Riel zap_flags_t zap_flags = details ? details->zap_flags : 0;
54502820b0f0SRik van Riel
54512820b0f0SRik van Riel if (!vma->vm_file) /* hugetlbfs_file_mmap error */
54522820b0f0SRik van Riel return;
5453d833352aSMel Gorman
545404ada095SMike Kravetz if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5455d833352aSMel Gorman /*
545604ada095SMike Kravetz * Unlock and free the vma lock before releasing i_mmap_rwsem.
545704ada095SMike Kravetz * When the vma_lock is freed, this makes the vma ineligible
545804ada095SMike Kravetz * for pmd sharing. And, i_mmap_rwsem is required to set up
545904ada095SMike Kravetz * pmd sharing. This is important as page tables for this
546004ada095SMike Kravetz * unmapped range will be asynchrously deleted. If the page
546104ada095SMike Kravetz * tables are shared, there will be issues when accessed by
546204ada095SMike Kravetz * someone else.
5463d833352aSMel Gorman */
5464ecfbd733SMike Kravetz __hugetlb_vma_unlock_write_free(vma);
546504ada095SMike Kravetz } else {
546604ada095SMike Kravetz hugetlb_vma_unlock_write(vma);
546704ada095SMike Kravetz }
54682820b0f0SRik van Riel
54692820b0f0SRik van Riel if (vma->vm_file)
54702820b0f0SRik van Riel i_mmap_unlock_write(vma->vm_file->f_mapping);
5471d833352aSMel Gorman }
5472d833352aSMel Gorman
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)5473502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
547405e90bd0SPeter Xu unsigned long end, struct page *ref_page,
547505e90bd0SPeter Xu zap_flags_t zap_flags)
5476502717f4SChen, Kenneth W {
5477369258ceSMike Kravetz struct mmu_notifier_range range;
547824669e58SAneesh Kumar K.V struct mmu_gather tlb;
5479dff11abeSMike Kravetz
54807d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5481369258ceSMike Kravetz start, end);
5482369258ceSMike Kravetz adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5483369258ceSMike Kravetz mmu_notifier_invalidate_range_start(&range);
5484a72afd87SWill Deacon tlb_gather_mmu(&tlb, vma->vm_mm);
5485369258ceSMike Kravetz
548605e90bd0SPeter Xu __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5487369258ceSMike Kravetz
5488369258ceSMike Kravetz mmu_notifier_invalidate_range_end(&range);
5489ae8eba8bSWill Deacon tlb_finish_mmu(&tlb);
5490502717f4SChen, Kenneth W }
5491502717f4SChen, Kenneth W
549204f2cbe3SMel Gorman /*
549304f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE
5494578b7725SZhiyuan Dai * mapping it owns the reserve page for. The intention is to unmap the page
549504f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the
549604f2cbe3SMel Gorman * same region.
549704f2cbe3SMel Gorman */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct page * page,unsigned long address)54982f4612afSDavidlohr Bueso static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
54992a4b3dedSHarvey Harrison struct page *page, unsigned long address)
550004f2cbe3SMel Gorman {
55017526674dSAdam Litke struct hstate *h = hstate_vma(vma);
550204f2cbe3SMel Gorman struct vm_area_struct *iter_vma;
550304f2cbe3SMel Gorman struct address_space *mapping;
550404f2cbe3SMel Gorman pgoff_t pgoff;
550504f2cbe3SMel Gorman
550604f2cbe3SMel Gorman /*
550704f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation
550804f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units.
550904f2cbe3SMel Gorman */
55107526674dSAdam Litke address = address & huge_page_mask(h);
551136e4f20aSMichal Hocko pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
551236e4f20aSMichal Hocko vma->vm_pgoff;
551393c76a3dSAl Viro mapping = vma->vm_file->f_mapping;
551404f2cbe3SMel Gorman
55154eb2b1dcSMel Gorman /*
55164eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As
55174eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs,
55184eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held
55194eb2b1dcSMel Gorman */
552083cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping);
55216b2dbba8SMichel Lespinasse vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
552204f2cbe3SMel Gorman /* Do not unmap the current VMA */
552304f2cbe3SMel Gorman if (iter_vma == vma)
552404f2cbe3SMel Gorman continue;
552504f2cbe3SMel Gorman
552604f2cbe3SMel Gorman /*
55272f84a899SMel Gorman * Shared VMAs have their own reserves and do not affect
55282f84a899SMel Gorman * MAP_PRIVATE accounting but it is possible that a shared
55292f84a899SMel Gorman * VMA is using the same page so check and skip such VMAs.
55302f84a899SMel Gorman */
55312f84a899SMel Gorman if (iter_vma->vm_flags & VM_MAYSHARE)
55322f84a899SMel Gorman continue;
55332f84a899SMel Gorman
55342f84a899SMel Gorman /*
553504f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves.
553604f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these
553704f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA
553804f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing
553904f2cbe3SMel Gorman * from the time of fork. This would look like data corruption
554004f2cbe3SMel Gorman */
554104f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
554224669e58SAneesh Kumar K.V unmap_hugepage_range(iter_vma, address,
554305e90bd0SPeter Xu address + huge_page_size(h), page, 0);
554404f2cbe3SMel Gorman }
554583cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping);
554604f2cbe3SMel Gorman }
554704f2cbe3SMel Gorman
55480fe6e20bSNaoya Horiguchi /*
5549c89357e2SDavid Hildenbrand * hugetlb_wp() should be called with page lock of the original hugepage held.
5550aa6d2e8cSBaolin Wang * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5551ef009b25SMichal Hocko * cannot race with other handlers or page migration.
5552ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier.
55530fe6e20bSNaoya Horiguchi */
hugetlb_wp(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int flags,struct folio * pagecache_folio,spinlock_t * ptl)5554c89357e2SDavid Hildenbrand static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5555c89357e2SDavid Hildenbrand unsigned long address, pte_t *ptep, unsigned int flags,
5556371607a3SSidhartha Kumar struct folio *pagecache_folio, spinlock_t *ptl)
55571e8f889bSDavid Gibson {
5558c89357e2SDavid Hildenbrand const bool unshare = flags & FAULT_FLAG_UNSHARE;
555960d5b473SPeter Xu pte_t pte = huge_ptep_get(ptep);
5560a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
5561959a78b6SZhangPeng struct folio *old_folio;
5562d0ce0e47SSidhartha Kumar struct folio *new_folio;
55632b740303SSouptick Joarder int outside_reserve = 0;
55642b740303SSouptick Joarder vm_fault_t ret = 0;
5565974e6d66SHuang Ying unsigned long haddr = address & huge_page_mask(h);
5566ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
55671e8f889bSDavid Gibson
55681d8d1464SDavid Hildenbrand /*
556960d5b473SPeter Xu * Never handle CoW for uffd-wp protected pages. It should be only
557060d5b473SPeter Xu * handled when the uffd-wp protection is removed.
557160d5b473SPeter Xu *
557260d5b473SPeter Xu * Note that only the CoW optimization path (in hugetlb_no_page())
557360d5b473SPeter Xu * can trigger this, because hugetlb_fault() will always resolve
557460d5b473SPeter Xu * uffd-wp bit first.
557560d5b473SPeter Xu */
557660d5b473SPeter Xu if (!unshare && huge_pte_uffd_wp(pte))
557760d5b473SPeter Xu return 0;
557860d5b473SPeter Xu
557960d5b473SPeter Xu /*
55801d8d1464SDavid Hildenbrand * hugetlb does not support FOLL_FORCE-style write faults that keep the
55811d8d1464SDavid Hildenbrand * PTE mapped R/O such as maybe_mkwrite() would do.
55821d8d1464SDavid Hildenbrand */
55831d8d1464SDavid Hildenbrand if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
55841d8d1464SDavid Hildenbrand return VM_FAULT_SIGSEGV;
55851d8d1464SDavid Hildenbrand
55861d8d1464SDavid Hildenbrand /* Let's take out MAP_SHARED mappings first. */
55871d8d1464SDavid Hildenbrand if (vma->vm_flags & VM_MAYSHARE) {
55881d8d1464SDavid Hildenbrand set_huge_ptep_writable(vma, haddr, ptep);
55891d8d1464SDavid Hildenbrand return 0;
55901d8d1464SDavid Hildenbrand }
55911d8d1464SDavid Hildenbrand
5592959a78b6SZhangPeng old_folio = page_folio(pte_page(pte));
55931e8f889bSDavid Gibson
5594662ce1dcSYang Yang delayacct_wpcopy_start();
5595662ce1dcSYang Yang
559604f2cbe3SMel Gorman retry_avoidcopy:
5597c89357e2SDavid Hildenbrand /*
5598c89357e2SDavid Hildenbrand * If no-one else is actually using this page, we're the exclusive
5599c89357e2SDavid Hildenbrand * owner and can reuse this page.
5600c89357e2SDavid Hildenbrand */
5601959a78b6SZhangPeng if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5602959a78b6SZhangPeng if (!PageAnonExclusive(&old_folio->page))
5603959a78b6SZhangPeng page_move_anon_rmap(&old_folio->page, vma);
5604c89357e2SDavid Hildenbrand if (likely(!unshare))
56055b7a1d40SHuang Ying set_huge_ptep_writable(vma, haddr, ptep);
5606662ce1dcSYang Yang
5607662ce1dcSYang Yang delayacct_wpcopy_end();
560883c54070SNick Piggin return 0;
56091e8f889bSDavid Gibson }
5610959a78b6SZhangPeng VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5611959a78b6SZhangPeng PageAnonExclusive(&old_folio->page), &old_folio->page);
56121e8f889bSDavid Gibson
561304f2cbe3SMel Gorman /*
561404f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to
561504f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy
561604f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache
561704f2cbe3SMel Gorman * page is used to determine if the reserve at this address was
561804f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping
561904f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead
562004f2cbe3SMel Gorman * of the full address range.
562104f2cbe3SMel Gorman */
56225944d011SJoonsoo Kim if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5623959a78b6SZhangPeng old_folio != pagecache_folio)
562404f2cbe3SMel Gorman outside_reserve = 1;
562504f2cbe3SMel Gorman
5626959a78b6SZhangPeng folio_get(old_folio);
5627b76c8cfbSLarry Woodman
5628ad4404a2SDavidlohr Bueso /*
5629ad4404a2SDavidlohr Bueso * Drop page table lock as buddy allocator may be called. It will
5630ad4404a2SDavidlohr Bueso * be acquired again before returning to the caller, as expected.
5631ad4404a2SDavidlohr Bueso */
5632cb900f41SKirill A. Shutemov spin_unlock(ptl);
5633d0ce0e47SSidhartha Kumar new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
56341e8f889bSDavid Gibson
5635d0ce0e47SSidhartha Kumar if (IS_ERR(new_folio)) {
563604f2cbe3SMel Gorman /*
563704f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW,
563804f2cbe3SMel Gorman * it is due to references held by a child and an insufficient
563904f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers
564004f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child
564104f2cbe3SMel Gorman * may get SIGKILLed if it later faults.
564204f2cbe3SMel Gorman */
564304f2cbe3SMel Gorman if (outside_reserve) {
564440549ba8SMike Kravetz struct address_space *mapping = vma->vm_file->f_mapping;
564540549ba8SMike Kravetz pgoff_t idx;
564640549ba8SMike Kravetz u32 hash;
564740549ba8SMike Kravetz
5648959a78b6SZhangPeng folio_put(old_folio);
564940549ba8SMike Kravetz /*
565040549ba8SMike Kravetz * Drop hugetlb_fault_mutex and vma_lock before
565140549ba8SMike Kravetz * unmapping. unmapping needs to hold vma_lock
565240549ba8SMike Kravetz * in write mode. Dropping vma_lock in read mode
565340549ba8SMike Kravetz * here is OK as COW mappings do not interact with
565440549ba8SMike Kravetz * PMD sharing.
565540549ba8SMike Kravetz *
565640549ba8SMike Kravetz * Reacquire both after unmap operation.
565740549ba8SMike Kravetz */
565840549ba8SMike Kravetz idx = vma_hugecache_offset(h, vma, haddr);
565940549ba8SMike Kravetz hash = hugetlb_fault_mutex_hash(mapping, idx);
566040549ba8SMike Kravetz hugetlb_vma_unlock_read(vma);
566140549ba8SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
566240549ba8SMike Kravetz
5663959a78b6SZhangPeng unmap_ref_private(mm, vma, &old_folio->page, haddr);
566440549ba8SMike Kravetz
566540549ba8SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]);
566640549ba8SMike Kravetz hugetlb_vma_lock_read(vma);
5667cb900f41SKirill A. Shutemov spin_lock(ptl);
56689c67a207SPeter Xu ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5669a9af0c5dSNaoya Horiguchi if (likely(ptep &&
5670a9af0c5dSNaoya Horiguchi pte_same(huge_ptep_get(ptep), pte)))
567104f2cbe3SMel Gorman goto retry_avoidcopy;
5672a734bcc8SHillf Danton /*
5673cb900f41SKirill A. Shutemov * race occurs while re-acquiring page table
5674cb900f41SKirill A. Shutemov * lock, and our job is done.
5675a734bcc8SHillf Danton */
5676662ce1dcSYang Yang delayacct_wpcopy_end();
5677a734bcc8SHillf Danton return 0;
567804f2cbe3SMel Gorman }
567904f2cbe3SMel Gorman
5680d0ce0e47SSidhartha Kumar ret = vmf_error(PTR_ERR(new_folio));
5681ad4404a2SDavidlohr Bueso goto out_release_old;
56821e8f889bSDavid Gibson }
56831e8f889bSDavid Gibson
56840fe6e20bSNaoya Horiguchi /*
56850fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have
56860fe6e20bSNaoya Horiguchi * anon_vma prepared.
56870fe6e20bSNaoya Horiguchi */
568844e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) {
5689ad4404a2SDavidlohr Bueso ret = VM_FAULT_OOM;
5690ad4404a2SDavidlohr Bueso goto out_release_all;
569144e2aa93SDean Nelson }
56920fe6e20bSNaoya Horiguchi
5693959a78b6SZhangPeng if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
56941cb9dc4bSLiu Shixin ret = VM_FAULT_HWPOISON_LARGE;
56951cb9dc4bSLiu Shixin goto out_release_all;
56961cb9dc4bSLiu Shixin }
5697d0ce0e47SSidhartha Kumar __folio_mark_uptodate(new_folio);
56981e8f889bSDavid Gibson
56997d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
57006f4f13e8SJérôme Glisse haddr + huge_page_size(h));
5701ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
5702ad4404a2SDavidlohr Bueso
5703b76c8cfbSLarry Woodman /*
5704cb900f41SKirill A. Shutemov * Retake the page table lock to check for racing updates
5705b76c8cfbSLarry Woodman * before the page tables are altered
5706b76c8cfbSLarry Woodman */
5707cb900f41SKirill A. Shutemov spin_lock(ptl);
57089c67a207SPeter Xu ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5709a9af0c5dSNaoya Horiguchi if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
57100f230bc2SPeter Xu pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
57110f230bc2SPeter Xu
5712c89357e2SDavid Hildenbrand /* Break COW or unshare */
57135b7a1d40SHuang Ying huge_ptep_clear_flush(vma, haddr, ptep);
5714959a78b6SZhangPeng page_remove_rmap(&old_folio->page, vma, true);
5715d0ce0e47SSidhartha Kumar hugepage_add_new_anon_rmap(new_folio, vma, haddr);
57160f230bc2SPeter Xu if (huge_pte_uffd_wp(pte))
57170f230bc2SPeter Xu newpte = huge_pte_mkuffd_wp(newpte);
5718935d4f0cSRyan Roberts set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
5719d0ce0e47SSidhartha Kumar folio_set_hugetlb_migratable(new_folio);
57201e8f889bSDavid Gibson /* Make the old page be freed below */
5721959a78b6SZhangPeng new_folio = old_folio;
57221e8f889bSDavid Gibson }
5723cb900f41SKirill A. Shutemov spin_unlock(ptl);
5724ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
5725ad4404a2SDavidlohr Bueso out_release_all:
5726c89357e2SDavid Hildenbrand /*
5727c89357e2SDavid Hildenbrand * No restore in case of successful pagetable update (Break COW or
5728c89357e2SDavid Hildenbrand * unshare)
5729c89357e2SDavid Hildenbrand */
5730959a78b6SZhangPeng if (new_folio != old_folio)
5731d2d7bb44SSidhartha Kumar restore_reserve_on_error(h, vma, haddr, new_folio);
5732d0ce0e47SSidhartha Kumar folio_put(new_folio);
5733ad4404a2SDavidlohr Bueso out_release_old:
5734959a78b6SZhangPeng folio_put(old_folio);
57358312034fSJoonsoo Kim
5736ad4404a2SDavidlohr Bueso spin_lock(ptl); /* Caller expects lock to be held */
5737662ce1dcSYang Yang
5738662ce1dcSYang Yang delayacct_wpcopy_end();
5739ad4404a2SDavidlohr Bueso return ret;
57401e8f889bSDavid Gibson }
57411e8f889bSDavid Gibson
57423ae77f43SHugh Dickins /*
57433ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA.
57443ae77f43SHugh Dickins */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)57453ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
57462a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address)
57472a15efc9SHugh Dickins {
574891a2fb95SSidhartha Kumar struct address_space *mapping = vma->vm_file->f_mapping;
574991a2fb95SSidhartha Kumar pgoff_t idx = vma_hugecache_offset(h, vma, address);
5750fd4aed8dSMike Kravetz struct folio *folio;
57512a15efc9SHugh Dickins
5752fd4aed8dSMike Kravetz folio = filemap_get_folio(mapping, idx);
5753fd4aed8dSMike Kravetz if (IS_ERR(folio))
5754fd4aed8dSMike Kravetz return false;
5755fd4aed8dSMike Kravetz folio_put(folio);
5756fd4aed8dSMike Kravetz return true;
57572a15efc9SHugh Dickins }
57582a15efc9SHugh Dickins
hugetlb_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t idx)57599b91c0e2SSidhartha Kumar int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5760ab76ad54SMike Kravetz pgoff_t idx)
5761ab76ad54SMike Kravetz {
5762ab76ad54SMike Kravetz struct inode *inode = mapping->host;
5763ab76ad54SMike Kravetz struct hstate *h = hstate_inode(inode);
5764d9ef44deSMatthew Wilcox (Oracle) int err;
5765ab76ad54SMike Kravetz
5766d9ef44deSMatthew Wilcox (Oracle) __folio_set_locked(folio);
5767d9ef44deSMatthew Wilcox (Oracle) err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5768d9ef44deSMatthew Wilcox (Oracle)
5769d9ef44deSMatthew Wilcox (Oracle) if (unlikely(err)) {
5770d9ef44deSMatthew Wilcox (Oracle) __folio_clear_locked(folio);
5771ab76ad54SMike Kravetz return err;
5772d9ef44deSMatthew Wilcox (Oracle) }
57739b91c0e2SSidhartha Kumar folio_clear_hugetlb_restore_reserve(folio);
5774ab76ad54SMike Kravetz
577522146c3cSMike Kravetz /*
5776d9ef44deSMatthew Wilcox (Oracle) * mark folio dirty so that it will not be removed from cache/file
577722146c3cSMike Kravetz * by non-hugetlbfs specific code paths.
577822146c3cSMike Kravetz */
5779d9ef44deSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
578022146c3cSMike Kravetz
5781ab76ad54SMike Kravetz spin_lock(&inode->i_lock);
5782ab76ad54SMike Kravetz inode->i_blocks += blocks_per_huge_page(h);
5783ab76ad54SMike Kravetz spin_unlock(&inode->i_lock);
5784ab76ad54SMike Kravetz return 0;
5785ab76ad54SMike Kravetz }
5786ab76ad54SMike Kravetz
hugetlb_handle_userfault(struct vm_area_struct * vma,struct address_space * mapping,pgoff_t idx,unsigned int flags,unsigned long haddr,unsigned long addr,unsigned long reason)57877677f7fdSAxel Rasmussen static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
57887677f7fdSAxel Rasmussen struct address_space *mapping,
57897677f7fdSAxel Rasmussen pgoff_t idx,
57907677f7fdSAxel Rasmussen unsigned int flags,
57917677f7fdSAxel Rasmussen unsigned long haddr,
5792824ddc60SNadav Amit unsigned long addr,
57937677f7fdSAxel Rasmussen unsigned long reason)
57947677f7fdSAxel Rasmussen {
57957677f7fdSAxel Rasmussen u32 hash;
57967677f7fdSAxel Rasmussen struct vm_fault vmf = {
57977677f7fdSAxel Rasmussen .vma = vma,
57987677f7fdSAxel Rasmussen .address = haddr,
5799824ddc60SNadav Amit .real_address = addr,
58007677f7fdSAxel Rasmussen .flags = flags,
58017677f7fdSAxel Rasmussen
58027677f7fdSAxel Rasmussen /*
58037677f7fdSAxel Rasmussen * Hard to debug if it ends up being
58047677f7fdSAxel Rasmussen * used by a callee that assumes
58057677f7fdSAxel Rasmussen * something about the other
58067677f7fdSAxel Rasmussen * uninitialized fields... same as in
58077677f7fdSAxel Rasmussen * memory.c
58087677f7fdSAxel Rasmussen */
58097677f7fdSAxel Rasmussen };
58107677f7fdSAxel Rasmussen
58117677f7fdSAxel Rasmussen /*
5812958f32ceSLiu Shixin * vma_lock and hugetlb_fault_mutex must be dropped before handling
5813958f32ceSLiu Shixin * userfault. Also mmap_lock could be dropped due to handling
5814958f32ceSLiu Shixin * userfault, any vma operation should be careful from here.
58157677f7fdSAxel Rasmussen */
581640549ba8SMike Kravetz hugetlb_vma_unlock_read(vma);
58177677f7fdSAxel Rasmussen hash = hugetlb_fault_mutex_hash(mapping, idx);
58187677f7fdSAxel Rasmussen mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5819958f32ceSLiu Shixin return handle_userfault(&vmf, reason);
58207677f7fdSAxel Rasmussen }
58217677f7fdSAxel Rasmussen
58222ea7ff1eSPeter Xu /*
58232ea7ff1eSPeter Xu * Recheck pte with pgtable lock. Returns true if pte didn't change, or
58242ea7ff1eSPeter Xu * false if pte changed or is changing.
58252ea7ff1eSPeter Xu */
hugetlb_pte_stable(struct hstate * h,struct mm_struct * mm,pte_t * ptep,pte_t old_pte)58262ea7ff1eSPeter Xu static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
58272ea7ff1eSPeter Xu pte_t *ptep, pte_t old_pte)
58282ea7ff1eSPeter Xu {
58292ea7ff1eSPeter Xu spinlock_t *ptl;
58302ea7ff1eSPeter Xu bool same;
58312ea7ff1eSPeter Xu
58322ea7ff1eSPeter Xu ptl = huge_pte_lock(h, mm, ptep);
58332ea7ff1eSPeter Xu same = pte_same(huge_ptep_get(ptep), old_pte);
58342ea7ff1eSPeter Xu spin_unlock(ptl);
58352ea7ff1eSPeter Xu
58362ea7ff1eSPeter Xu return same;
58372ea7ff1eSPeter Xu }
58382ea7ff1eSPeter Xu
hugetlb_no_page(struct mm_struct * mm,struct vm_area_struct * vma,struct address_space * mapping,pgoff_t idx,unsigned long address,pte_t * ptep,pte_t old_pte,unsigned int flags)58392b740303SSouptick Joarder static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
58402b740303SSouptick Joarder struct vm_area_struct *vma,
58418382d914SDavidlohr Bueso struct address_space *mapping, pgoff_t idx,
5842c64e912cSPeter Xu unsigned long address, pte_t *ptep,
5843c64e912cSPeter Xu pte_t old_pte, unsigned int flags)
5844ac9b9c66SHugh Dickins {
5845a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
58462b740303SSouptick Joarder vm_fault_t ret = VM_FAULT_SIGBUS;
5847409eb8c2SHillf Danton int anon_rmap = 0;
58484c887265SAdam Litke unsigned long size;
5849d0ce0e47SSidhartha Kumar struct folio *folio;
58501e8f889bSDavid Gibson pte_t new_pte;
5851cb900f41SKirill A. Shutemov spinlock_t *ptl;
5852285b8dcaSHuang Ying unsigned long haddr = address & huge_page_mask(h);
5853d0ce0e47SSidhartha Kumar bool new_folio, new_pagecache_folio = false;
5854958f32ceSLiu Shixin u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
58554c887265SAdam Litke
585604f2cbe3SMel Gorman /*
585704f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the
585804f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed
5859c89357e2SDavid Hildenbrand * COW/unsharing. Warn that such a situation has occurred as it may not
5860c89357e2SDavid Hildenbrand * be obvious.
586104f2cbe3SMel Gorman */
586204f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5863910154d5SGeoffrey Thomas pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
586404f2cbe3SMel Gorman current->pid);
5865958f32ceSLiu Shixin goto out;
586604f2cbe3SMel Gorman }
586704f2cbe3SMel Gorman
58684c887265SAdam Litke /*
5869188a3972SMike Kravetz * Use page lock to guard against racing truncation
5870188a3972SMike Kravetz * before we get page_table_lock.
58714c887265SAdam Litke */
5872d0ce0e47SSidhartha Kumar new_folio = false;
5873d0ce0e47SSidhartha Kumar folio = filemap_lock_folio(mapping, idx);
587466dabbb6SChristoph Hellwig if (IS_ERR(folio)) {
5875188a3972SMike Kravetz size = i_size_read(mapping->host) >> huge_page_shift(h);
5876188a3972SMike Kravetz if (idx >= size)
5877188a3972SMike Kravetz goto out;
58787677f7fdSAxel Rasmussen /* Check for page in userfault range */
58792ea7ff1eSPeter Xu if (userfaultfd_missing(vma)) {
58802ea7ff1eSPeter Xu /*
58812ea7ff1eSPeter Xu * Since hugetlb_no_page() was examining pte
58822ea7ff1eSPeter Xu * without pgtable lock, we need to re-test under
58832ea7ff1eSPeter Xu * lock because the pte may not be stable and could
58842ea7ff1eSPeter Xu * have changed from under us. Try to detect
58852ea7ff1eSPeter Xu * either changed or during-changing ptes and retry
58862ea7ff1eSPeter Xu * properly when needed.
58872ea7ff1eSPeter Xu *
58882ea7ff1eSPeter Xu * Note that userfaultfd is actually fine with
58892ea7ff1eSPeter Xu * false positives (e.g. caused by pte changed),
58902ea7ff1eSPeter Xu * but not wrong logical events (e.g. caused by
58912ea7ff1eSPeter Xu * reading a pte during changing). The latter can
58922ea7ff1eSPeter Xu * confuse the userspace, so the strictness is very
58932ea7ff1eSPeter Xu * much preferred. E.g., MISSING event should
58942ea7ff1eSPeter Xu * never happen on the page after UFFDIO_COPY has
58952ea7ff1eSPeter Xu * correctly installed the page and returned.
58962ea7ff1eSPeter Xu */
58972ea7ff1eSPeter Xu if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
58982ea7ff1eSPeter Xu ret = 0;
58992ea7ff1eSPeter Xu goto out;
59002ea7ff1eSPeter Xu }
59012ea7ff1eSPeter Xu
59022ea7ff1eSPeter Xu return hugetlb_handle_userfault(vma, mapping, idx, flags,
59032ea7ff1eSPeter Xu haddr, address,
59047677f7fdSAxel Rasmussen VM_UFFD_MISSING);
59052ea7ff1eSPeter Xu }
59061a1aad8aSMike Kravetz
5907d0ce0e47SSidhartha Kumar folio = alloc_hugetlb_folio(vma, haddr, 0);
5908d0ce0e47SSidhartha Kumar if (IS_ERR(folio)) {
59094643d67eSMike Kravetz /*
59104643d67eSMike Kravetz * Returning error will result in faulting task being
59114643d67eSMike Kravetz * sent SIGBUS. The hugetlb fault mutex prevents two
59124643d67eSMike Kravetz * tasks from racing to fault in the same page which
59134643d67eSMike Kravetz * could result in false unable to allocate errors.
59144643d67eSMike Kravetz * Page migration does not take the fault mutex, but
59154643d67eSMike Kravetz * does a clear then write of pte's under page table
59164643d67eSMike Kravetz * lock. Page fault code could race with migration,
59174643d67eSMike Kravetz * notice the clear pte and try to allocate a page
59184643d67eSMike Kravetz * here. Before returning error, get ptl and make
59194643d67eSMike Kravetz * sure there really is no pte entry.
59204643d67eSMike Kravetz */
5921f9bf6c03SPeter Xu if (hugetlb_pte_stable(h, mm, ptep, old_pte))
5922d0ce0e47SSidhartha Kumar ret = vmf_error(PTR_ERR(folio));
5923f9bf6c03SPeter Xu else
5924f9bf6c03SPeter Xu ret = 0;
59256bda666aSChristoph Lameter goto out;
59266bda666aSChristoph Lameter }
5927d0ce0e47SSidhartha Kumar clear_huge_page(&folio->page, address, pages_per_huge_page(h));
5928d0ce0e47SSidhartha Kumar __folio_mark_uptodate(folio);
5929d0ce0e47SSidhartha Kumar new_folio = true;
5930ac9b9c66SHugh Dickins
5931f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) {
59329b91c0e2SSidhartha Kumar int err = hugetlb_add_to_page_cache(folio, mapping, idx);
59336bda666aSChristoph Lameter if (err) {
59343a5497a2SMiaohe Lin /*
59353a5497a2SMiaohe Lin * err can't be -EEXIST which implies someone
59363a5497a2SMiaohe Lin * else consumed the reservation since hugetlb
59373a5497a2SMiaohe Lin * fault mutex is held when add a hugetlb page
59383a5497a2SMiaohe Lin * to the page cache. So it's safe to call
59393a5497a2SMiaohe Lin * restore_reserve_on_error() here.
59403a5497a2SMiaohe Lin */
5941d2d7bb44SSidhartha Kumar restore_reserve_on_error(h, vma, haddr, folio);
5942d0ce0e47SSidhartha Kumar folio_put(folio);
59436bda666aSChristoph Lameter goto out;
59446bda666aSChristoph Lameter }
5945d0ce0e47SSidhartha Kumar new_pagecache_folio = true;
594623be7468SMel Gorman } else {
5947d0ce0e47SSidhartha Kumar folio_lock(folio);
59480fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) {
59490fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM;
59500fe6e20bSNaoya Horiguchi goto backout_unlocked;
595123be7468SMel Gorman }
5952409eb8c2SHillf Danton anon_rmap = 1;
59530fe6e20bSNaoya Horiguchi }
59540fe6e20bSNaoya Horiguchi } else {
595557303d80SAndy Whitcroft /*
5956998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process
5957998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address.
5958998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check.
5959fd6a03edSNaoya Horiguchi */
5960d0ce0e47SSidhartha Kumar if (unlikely(folio_test_hwpoison(folio))) {
59610eb98f15SMiaohe Lin ret = VM_FAULT_HWPOISON_LARGE |
5962972dc4deSAneesh Kumar K.V VM_FAULT_SET_HINDEX(hstate_index(h));
5963fd6a03edSNaoya Horiguchi goto backout_unlocked;
59646bda666aSChristoph Lameter }
59657677f7fdSAxel Rasmussen
59667677f7fdSAxel Rasmussen /* Check for page in userfault range. */
59677677f7fdSAxel Rasmussen if (userfaultfd_minor(vma)) {
5968d0ce0e47SSidhartha Kumar folio_unlock(folio);
5969d0ce0e47SSidhartha Kumar folio_put(folio);
59702ea7ff1eSPeter Xu /* See comment in userfaultfd_missing() block above */
59712ea7ff1eSPeter Xu if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
59722ea7ff1eSPeter Xu ret = 0;
59732ea7ff1eSPeter Xu goto out;
59742ea7ff1eSPeter Xu }
59752ea7ff1eSPeter Xu return hugetlb_handle_userfault(vma, mapping, idx, flags,
59762ea7ff1eSPeter Xu haddr, address,
59777677f7fdSAxel Rasmussen VM_UFFD_MINOR);
59787677f7fdSAxel Rasmussen }
5979998b4382SNaoya Horiguchi }
59801e8f889bSDavid Gibson
598157303d80SAndy Whitcroft /*
598257303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the
598357303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that
598457303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside
598557303d80SAndy Whitcroft * the spinlock.
598657303d80SAndy Whitcroft */
59875e911373SMike Kravetz if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5988285b8dcaSHuang Ying if (vma_needs_reservation(h, vma, haddr) < 0) {
59892b26736cSAndy Whitcroft ret = VM_FAULT_OOM;
59902b26736cSAndy Whitcroft goto backout_unlocked;
59912b26736cSAndy Whitcroft }
59925e911373SMike Kravetz /* Just decrements count, does not deallocate */
5993285b8dcaSHuang Ying vma_end_reservation(h, vma, haddr);
59945e911373SMike Kravetz }
599557303d80SAndy Whitcroft
59968bea8052SAneesh Kumar K.V ptl = huge_pte_lock(h, mm, ptep);
599783c54070SNick Piggin ret = 0;
5998c64e912cSPeter Xu /* If pte changed from under us, retry */
5999c64e912cSPeter Xu if (!pte_same(huge_ptep_get(ptep), old_pte))
60004c887265SAdam Litke goto backout;
60014c887265SAdam Litke
60024781593dSPeter Xu if (anon_rmap)
6003d0ce0e47SSidhartha Kumar hugepage_add_new_anon_rmap(folio, vma, haddr);
60044781593dSPeter Xu else
6005d0ce0e47SSidhartha Kumar page_dup_file_rmap(&folio->page, true);
6006d0ce0e47SSidhartha Kumar new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
60071e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED)));
6008c64e912cSPeter Xu /*
6009c64e912cSPeter Xu * If this pte was previously wr-protected, keep it wr-protected even
6010c64e912cSPeter Xu * if populated.
6011c64e912cSPeter Xu */
6012c64e912cSPeter Xu if (unlikely(pte_marker_uffd_wp(old_pte)))
6013f1eb1bacSPeter Xu new_pte = huge_pte_mkuffd_wp(new_pte);
6014935d4f0cSRyan Roberts set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
60151e8f889bSDavid Gibson
60165d317b2bSNaoya Horiguchi hugetlb_count_add(pages_per_huge_page(h), mm);
6017788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60181e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */
6019371607a3SSidhartha Kumar ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
60201e8f889bSDavid Gibson }
60211e8f889bSDavid Gibson
6022cb900f41SKirill A. Shutemov spin_unlock(ptl);
6023cb6acd01SMike Kravetz
6024cb6acd01SMike Kravetz /*
6025d0ce0e47SSidhartha Kumar * Only set hugetlb_migratable in newly allocated pages. Existing pages
6026d0ce0e47SSidhartha Kumar * found in the pagecache may not have hugetlb_migratable if they have
60278f251a3dSMike Kravetz * been isolated for migration.
6028cb6acd01SMike Kravetz */
6029d0ce0e47SSidhartha Kumar if (new_folio)
6030d0ce0e47SSidhartha Kumar folio_set_hugetlb_migratable(folio);
6031cb6acd01SMike Kravetz
6032d0ce0e47SSidhartha Kumar folio_unlock(folio);
60334c887265SAdam Litke out:
6034958f32ceSLiu Shixin hugetlb_vma_unlock_read(vma);
6035958f32ceSLiu Shixin mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6036ac9b9c66SHugh Dickins return ret;
60374c887265SAdam Litke
60384c887265SAdam Litke backout:
6039cb900f41SKirill A. Shutemov spin_unlock(ptl);
60402b26736cSAndy Whitcroft backout_unlocked:
6041d0ce0e47SSidhartha Kumar if (new_folio && !new_pagecache_folio)
6042d2d7bb44SSidhartha Kumar restore_reserve_on_error(h, vma, haddr, folio);
6043fa27759aSMike Kravetz
6044d0ce0e47SSidhartha Kumar folio_unlock(folio);
6045d0ce0e47SSidhartha Kumar folio_put(folio);
60464c887265SAdam Litke goto out;
6047ac9b9c66SHugh Dickins }
6048ac9b9c66SHugh Dickins
60498382d914SDavidlohr Bueso #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)6050188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
60518382d914SDavidlohr Bueso {
60528382d914SDavidlohr Bueso unsigned long key[2];
60538382d914SDavidlohr Bueso u32 hash;
60548382d914SDavidlohr Bueso
60558382d914SDavidlohr Bueso key[0] = (unsigned long) mapping;
60568382d914SDavidlohr Bueso key[1] = idx;
60578382d914SDavidlohr Bueso
605855254636SMike Kravetz hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
60598382d914SDavidlohr Bueso
60608382d914SDavidlohr Bueso return hash & (num_fault_mutexes - 1);
60618382d914SDavidlohr Bueso }
60628382d914SDavidlohr Bueso #else
60638382d914SDavidlohr Bueso /*
60646c26d310SMiaohe Lin * For uniprocessor systems we always use a single mutex, so just
60658382d914SDavidlohr Bueso * return 0 and avoid the hashing overhead.
60668382d914SDavidlohr Bueso */
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)6067188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
60688382d914SDavidlohr Bueso {
60698382d914SDavidlohr Bueso return 0;
60708382d914SDavidlohr Bueso }
60718382d914SDavidlohr Bueso #endif
60728382d914SDavidlohr Bueso
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)60732b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6074788c7df4SHugh Dickins unsigned long address, unsigned int flags)
607586e5216fSAdam Litke {
60768382d914SDavidlohr Bueso pte_t *ptep, entry;
6077cb900f41SKirill A. Shutemov spinlock_t *ptl;
60782b740303SSouptick Joarder vm_fault_t ret;
60798382d914SDavidlohr Bueso u32 hash;
60808382d914SDavidlohr Bueso pgoff_t idx;
6081061e62e8SZhangPeng struct folio *folio = NULL;
6082371607a3SSidhartha Kumar struct folio *pagecache_folio = NULL;
6083a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
60848382d914SDavidlohr Bueso struct address_space *mapping;
60850f792cf9SNaoya Horiguchi int need_wait_lock = 0;
6086285b8dcaSHuang Ying unsigned long haddr = address & huge_page_mask(h);
608786e5216fSAdam Litke
60884ec31152SMatthew Wilcox (Oracle) /* TODO: Handle faults under the VMA lock */
60894ec31152SMatthew Wilcox (Oracle) if (flags & FAULT_FLAG_VMA_LOCK) {
60904ec31152SMatthew Wilcox (Oracle) vma_end_read(vma);
60914ec31152SMatthew Wilcox (Oracle) return VM_FAULT_RETRY;
60924ec31152SMatthew Wilcox (Oracle) }
60934ec31152SMatthew Wilcox (Oracle)
60943935baa9SDavid Gibson /*
60953935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't
60963935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate
60973935baa9SDavid Gibson * the same page in the page cache.
60983935baa9SDavid Gibson */
609940549ba8SMike Kravetz mapping = vma->vm_file->f_mapping;
610040549ba8SMike Kravetz idx = vma_hugecache_offset(h, vma, haddr);
6101188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx);
6102c672c7f2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]);
61038382d914SDavidlohr Bueso
610440549ba8SMike Kravetz /*
610540549ba8SMike Kravetz * Acquire vma lock before calling huge_pte_alloc and hold
610640549ba8SMike Kravetz * until finished with ptep. This prevents huge_pmd_unshare from
610740549ba8SMike Kravetz * being called elsewhere and making the ptep no longer valid.
610840549ba8SMike Kravetz */
610940549ba8SMike Kravetz hugetlb_vma_lock_read(vma);
611040549ba8SMike Kravetz ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
611140549ba8SMike Kravetz if (!ptep) {
611240549ba8SMike Kravetz hugetlb_vma_unlock_read(vma);
611340549ba8SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
611440549ba8SMike Kravetz return VM_FAULT_OOM;
611540549ba8SMike Kravetz }
611640549ba8SMike Kravetz
61177f2e9525SGerald Schaefer entry = huge_ptep_get(ptep);
6118af19487fSAxel Rasmussen if (huge_pte_none_mostly(entry)) {
6119af19487fSAxel Rasmussen if (is_pte_marker(entry)) {
6120af19487fSAxel Rasmussen pte_marker marker =
6121af19487fSAxel Rasmussen pte_marker_get(pte_to_swp_entry(entry));
6122af19487fSAxel Rasmussen
6123af19487fSAxel Rasmussen if (marker & PTE_MARKER_POISONED) {
6124af19487fSAxel Rasmussen ret = VM_FAULT_HWPOISON_LARGE;
6125af19487fSAxel Rasmussen goto out_mutex;
6126af19487fSAxel Rasmussen }
6127af19487fSAxel Rasmussen }
6128af19487fSAxel Rasmussen
6129958f32ceSLiu Shixin /*
6130af19487fSAxel Rasmussen * Other PTE markers should be handled the same way as none PTE.
6131af19487fSAxel Rasmussen *
6132958f32ceSLiu Shixin * hugetlb_no_page will drop vma lock and hugetlb fault
6133958f32ceSLiu Shixin * mutex internally, which make us return immediately.
6134958f32ceSLiu Shixin */
6135958f32ceSLiu Shixin return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
6136c64e912cSPeter Xu entry, flags);
6137af19487fSAxel Rasmussen }
613886e5216fSAdam Litke
613983c54070SNick Piggin ret = 0;
61401e8f889bSDavid Gibson
614157303d80SAndy Whitcroft /*
61420f792cf9SNaoya Horiguchi * entry could be a migration/hwpoison entry at this point, so this
61430f792cf9SNaoya Horiguchi * check prevents the kernel from going below assuming that we have
61447c8de358SEthon Paul * an active hugepage in pagecache. This goto expects the 2nd page
61457c8de358SEthon Paul * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
61467c8de358SEthon Paul * properly handle it.
61470f792cf9SNaoya Horiguchi */
6148fcd48540SPeter Xu if (!pte_present(entry)) {
6149fcd48540SPeter Xu if (unlikely(is_hugetlb_entry_migration(entry))) {
6150fcd48540SPeter Xu /*
6151fcd48540SPeter Xu * Release the hugetlb fault lock now, but retain
6152fcd48540SPeter Xu * the vma lock, because it is needed to guard the
6153fcd48540SPeter Xu * huge_pte_lockptr() later in
6154fcd48540SPeter Xu * migration_entry_wait_huge(). The vma lock will
6155fcd48540SPeter Xu * be released there.
6156fcd48540SPeter Xu */
6157fcd48540SPeter Xu mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6158fcd48540SPeter Xu migration_entry_wait_huge(vma, ptep);
6159fcd48540SPeter Xu return 0;
6160fcd48540SPeter Xu } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
6161fcd48540SPeter Xu ret = VM_FAULT_HWPOISON_LARGE |
6162fcd48540SPeter Xu VM_FAULT_SET_HINDEX(hstate_index(h));
61630f792cf9SNaoya Horiguchi goto out_mutex;
6164fcd48540SPeter Xu }
61650f792cf9SNaoya Horiguchi
61660f792cf9SNaoya Horiguchi /*
6167c89357e2SDavid Hildenbrand * If we are going to COW/unshare the mapping later, we examine the
6168c89357e2SDavid Hildenbrand * pending reservations for this page now. This will ensure that any
616957303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the
61701d8d1464SDavid Hildenbrand * spinlock. Also lookup the pagecache page now as it is used to
61711d8d1464SDavid Hildenbrand * determine if a reservation has been consumed.
617257303d80SAndy Whitcroft */
6173c89357e2SDavid Hildenbrand if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
61741d8d1464SDavid Hildenbrand !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
6175285b8dcaSHuang Ying if (vma_needs_reservation(h, vma, haddr) < 0) {
61762b26736cSAndy Whitcroft ret = VM_FAULT_OOM;
6177b4d1d99fSDavid Gibson goto out_mutex;
61782b26736cSAndy Whitcroft }
61795e911373SMike Kravetz /* Just decrements count, does not deallocate */
6180285b8dcaSHuang Ying vma_end_reservation(h, vma, haddr);
618157303d80SAndy Whitcroft
6182371607a3SSidhartha Kumar pagecache_folio = filemap_lock_folio(mapping, idx);
618366dabbb6SChristoph Hellwig if (IS_ERR(pagecache_folio))
618466dabbb6SChristoph Hellwig pagecache_folio = NULL;
618557303d80SAndy Whitcroft }
618657303d80SAndy Whitcroft
61870f792cf9SNaoya Horiguchi ptl = huge_pte_lock(h, mm, ptep);
61880fe6e20bSNaoya Horiguchi
6189c89357e2SDavid Hildenbrand /* Check for a racing update before calling hugetlb_wp() */
6190b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
6191cb900f41SKirill A. Shutemov goto out_ptl;
6192b4d1d99fSDavid Gibson
6193166f3eccSPeter Xu /* Handle userfault-wp first, before trying to lock more pages */
6194166f3eccSPeter Xu if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
6195166f3eccSPeter Xu (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
6196166f3eccSPeter Xu struct vm_fault vmf = {
6197166f3eccSPeter Xu .vma = vma,
6198166f3eccSPeter Xu .address = haddr,
6199166f3eccSPeter Xu .real_address = address,
6200166f3eccSPeter Xu .flags = flags,
6201166f3eccSPeter Xu };
6202166f3eccSPeter Xu
6203166f3eccSPeter Xu spin_unlock(ptl);
6204371607a3SSidhartha Kumar if (pagecache_folio) {
6205371607a3SSidhartha Kumar folio_unlock(pagecache_folio);
6206371607a3SSidhartha Kumar folio_put(pagecache_folio);
6207166f3eccSPeter Xu }
620840549ba8SMike Kravetz hugetlb_vma_unlock_read(vma);
6209166f3eccSPeter Xu mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6210166f3eccSPeter Xu return handle_userfault(&vmf, VM_UFFD_WP);
6211166f3eccSPeter Xu }
6212166f3eccSPeter Xu
62130f792cf9SNaoya Horiguchi /*
6214c89357e2SDavid Hildenbrand * hugetlb_wp() requires page locks of pte_page(entry) and
6215371607a3SSidhartha Kumar * pagecache_folio, so here we need take the former one
6216061e62e8SZhangPeng * when folio != pagecache_folio or !pagecache_folio.
62170f792cf9SNaoya Horiguchi */
6218061e62e8SZhangPeng folio = page_folio(pte_page(entry));
6219061e62e8SZhangPeng if (folio != pagecache_folio)
6220061e62e8SZhangPeng if (!folio_trylock(folio)) {
62210f792cf9SNaoya Horiguchi need_wait_lock = 1;
62220f792cf9SNaoya Horiguchi goto out_ptl;
62230f792cf9SNaoya Horiguchi }
62240f792cf9SNaoya Horiguchi
6225061e62e8SZhangPeng folio_get(folio);
6226b4d1d99fSDavid Gibson
6227c89357e2SDavid Hildenbrand if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6228106c992aSGerald Schaefer if (!huge_pte_write(entry)) {
6229c89357e2SDavid Hildenbrand ret = hugetlb_wp(mm, vma, address, ptep, flags,
6230371607a3SSidhartha Kumar pagecache_folio, ptl);
62310f792cf9SNaoya Horiguchi goto out_put_page;
6232c89357e2SDavid Hildenbrand } else if (likely(flags & FAULT_FLAG_WRITE)) {
6233106c992aSGerald Schaefer entry = huge_pte_mkdirty(entry);
6234b4d1d99fSDavid Gibson }
6235c89357e2SDavid Hildenbrand }
6236b4d1d99fSDavid Gibson entry = pte_mkyoung(entry);
6237285b8dcaSHuang Ying if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
6238788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE))
6239285b8dcaSHuang Ying update_mmu_cache(vma, haddr, ptep);
62400f792cf9SNaoya Horiguchi out_put_page:
6241061e62e8SZhangPeng if (folio != pagecache_folio)
6242061e62e8SZhangPeng folio_unlock(folio);
6243061e62e8SZhangPeng folio_put(folio);
6244cb900f41SKirill A. Shutemov out_ptl:
6245cb900f41SKirill A. Shutemov spin_unlock(ptl);
624657303d80SAndy Whitcroft
6247371607a3SSidhartha Kumar if (pagecache_folio) {
6248371607a3SSidhartha Kumar folio_unlock(pagecache_folio);
6249371607a3SSidhartha Kumar folio_put(pagecache_folio);
625057303d80SAndy Whitcroft }
6251b4d1d99fSDavid Gibson out_mutex:
625240549ba8SMike Kravetz hugetlb_vma_unlock_read(vma);
6253c672c7f2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
62540f792cf9SNaoya Horiguchi /*
62550f792cf9SNaoya Horiguchi * Generally it's safe to hold refcount during waiting page lock. But
62560f792cf9SNaoya Horiguchi * here we just wait to defer the next page fault to avoid busy loop and
62570f792cf9SNaoya Horiguchi * the page is not used after unlocked before returning from the current
62580f792cf9SNaoya Horiguchi * page fault. So we are safe from accessing freed page, even if we wait
62590f792cf9SNaoya Horiguchi * here without taking refcount.
62600f792cf9SNaoya Horiguchi */
62610f792cf9SNaoya Horiguchi if (need_wait_lock)
6262061e62e8SZhangPeng folio_wait_locked(folio);
62631e8f889bSDavid Gibson return ret;
626486e5216fSAdam Litke }
626586e5216fSAdam Litke
6266714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
62678fb5debcSMike Kravetz /*
6268a734991cSAxel Rasmussen * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6269a734991cSAxel Rasmussen * with modifications for hugetlb pages.
62708fb5debcSMike Kravetz */
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)627161c50040SAxel Rasmussen int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
62728fb5debcSMike Kravetz struct vm_area_struct *dst_vma,
62738fb5debcSMike Kravetz unsigned long dst_addr,
62748fb5debcSMike Kravetz unsigned long src_addr,
6275d9712937SAxel Rasmussen uffd_flags_t flags,
62760169fd51SZhangPeng struct folio **foliop)
62778fb5debcSMike Kravetz {
627861c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm;
6279d9712937SAxel Rasmussen bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6280d9712937SAxel Rasmussen bool wp_enabled = (flags & MFILL_ATOMIC_WP);
62818cc5fcbbSMina Almasry struct hstate *h = hstate_vma(dst_vma);
62828cc5fcbbSMina Almasry struct address_space *mapping = dst_vma->vm_file->f_mapping;
62838cc5fcbbSMina Almasry pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
62841e392147SAndrea Arcangeli unsigned long size;
62851c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED;
62868fb5debcSMike Kravetz pte_t _dst_pte;
62878fb5debcSMike Kravetz spinlock_t *ptl;
62888cc5fcbbSMina Almasry int ret = -ENOMEM;
6289d0ce0e47SSidhartha Kumar struct folio *folio;
6290f6191471SAxel Rasmussen int writable;
6291d0ce0e47SSidhartha Kumar bool folio_in_pagecache = false;
62928fb5debcSMike Kravetz
62938a13897fSAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
62948a13897fSAxel Rasmussen ptl = huge_pte_lock(h, dst_mm, dst_pte);
62958a13897fSAxel Rasmussen
62968a13897fSAxel Rasmussen /* Don't overwrite any existing PTEs (even markers) */
62978a13897fSAxel Rasmussen if (!huge_pte_none(huge_ptep_get(dst_pte))) {
62988a13897fSAxel Rasmussen spin_unlock(ptl);
62998a13897fSAxel Rasmussen return -EEXIST;
63008a13897fSAxel Rasmussen }
63018a13897fSAxel Rasmussen
63028a13897fSAxel Rasmussen _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6303935d4f0cSRyan Roberts set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6304935d4f0cSRyan Roberts huge_page_size(h));
63058a13897fSAxel Rasmussen
63068a13897fSAxel Rasmussen /* No need to invalidate - it was non-present before */
63078a13897fSAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte);
63088a13897fSAxel Rasmussen
63098a13897fSAxel Rasmussen spin_unlock(ptl);
63108a13897fSAxel Rasmussen return 0;
63118a13897fSAxel Rasmussen }
63128a13897fSAxel Rasmussen
6313f6191471SAxel Rasmussen if (is_continue) {
6314f6191471SAxel Rasmussen ret = -EFAULT;
6315d0ce0e47SSidhartha Kumar folio = filemap_lock_folio(mapping, idx);
631666dabbb6SChristoph Hellwig if (IS_ERR(folio))
6317f6191471SAxel Rasmussen goto out;
6318d0ce0e47SSidhartha Kumar folio_in_pagecache = true;
63190169fd51SZhangPeng } else if (!*foliop) {
63200169fd51SZhangPeng /* If a folio already exists, then it's UFFDIO_COPY for
6321d84cf06eSMina Almasry * a non-missing case. Return -EEXIST.
6322d84cf06eSMina Almasry */
6323d84cf06eSMina Almasry if (vm_shared &&
6324d84cf06eSMina Almasry hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6325d84cf06eSMina Almasry ret = -EEXIST;
63268fb5debcSMike Kravetz goto out;
6327d84cf06eSMina Almasry }
6328d84cf06eSMina Almasry
6329d0ce0e47SSidhartha Kumar folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6330d0ce0e47SSidhartha Kumar if (IS_ERR(folio)) {
6331d84cf06eSMina Almasry ret = -ENOMEM;
6332d84cf06eSMina Almasry goto out;
6333d84cf06eSMina Almasry }
63348fb5debcSMike Kravetz
6335e87340caSZhangPeng ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6336e87340caSZhangPeng false);
63378fb5debcSMike Kravetz
6338c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */
63398fb5debcSMike Kravetz if (unlikely(ret)) {
63409e368259SAndrea Arcangeli ret = -ENOENT;
6341d0ce0e47SSidhartha Kumar /* Free the allocated folio which may have
63428cc5fcbbSMina Almasry * consumed a reservation.
63438cc5fcbbSMina Almasry */
6344d2d7bb44SSidhartha Kumar restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6345d0ce0e47SSidhartha Kumar folio_put(folio);
63468cc5fcbbSMina Almasry
6347d0ce0e47SSidhartha Kumar /* Allocate a temporary folio to hold the copied
63488cc5fcbbSMina Almasry * contents.
63498cc5fcbbSMina Almasry */
6350d0ce0e47SSidhartha Kumar folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6351d0ce0e47SSidhartha Kumar if (!folio) {
63528cc5fcbbSMina Almasry ret = -ENOMEM;
63538cc5fcbbSMina Almasry goto out;
63548cc5fcbbSMina Almasry }
63550169fd51SZhangPeng *foliop = folio;
63560169fd51SZhangPeng /* Set the outparam foliop and return to the caller to
63578cc5fcbbSMina Almasry * copy the contents outside the lock. Don't free the
63580169fd51SZhangPeng * folio.
63598cc5fcbbSMina Almasry */
63608fb5debcSMike Kravetz goto out;
63618fb5debcSMike Kravetz }
63628fb5debcSMike Kravetz } else {
63638cc5fcbbSMina Almasry if (vm_shared &&
63648cc5fcbbSMina Almasry hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
63650169fd51SZhangPeng folio_put(*foliop);
63668cc5fcbbSMina Almasry ret = -EEXIST;
63670169fd51SZhangPeng *foliop = NULL;
63688cc5fcbbSMina Almasry goto out;
63698cc5fcbbSMina Almasry }
63708cc5fcbbSMina Almasry
6371d0ce0e47SSidhartha Kumar folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6372d0ce0e47SSidhartha Kumar if (IS_ERR(folio)) {
63730169fd51SZhangPeng folio_put(*foliop);
63748cc5fcbbSMina Almasry ret = -ENOMEM;
63750169fd51SZhangPeng *foliop = NULL;
63768cc5fcbbSMina Almasry goto out;
63778cc5fcbbSMina Almasry }
63781cb9dc4bSLiu Shixin ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
63790169fd51SZhangPeng folio_put(*foliop);
63800169fd51SZhangPeng *foliop = NULL;
63811cb9dc4bSLiu Shixin if (ret) {
63821cb9dc4bSLiu Shixin folio_put(folio);
63831cb9dc4bSLiu Shixin goto out;
63841cb9dc4bSLiu Shixin }
63858fb5debcSMike Kravetz }
63868fb5debcSMike Kravetz
63878fb5debcSMike Kravetz /*
6388d0ce0e47SSidhartha Kumar * The memory barrier inside __folio_mark_uptodate makes sure that
63898fb5debcSMike Kravetz * preceding stores to the page contents become visible before
63908fb5debcSMike Kravetz * the set_pte_at() write.
63918fb5debcSMike Kravetz */
6392d0ce0e47SSidhartha Kumar __folio_mark_uptodate(folio);
63938fb5debcSMike Kravetz
6394f6191471SAxel Rasmussen /* Add shared, newly allocated pages to the page cache. */
6395f6191471SAxel Rasmussen if (vm_shared && !is_continue) {
63961e392147SAndrea Arcangeli size = i_size_read(mapping->host) >> huge_page_shift(h);
63971e392147SAndrea Arcangeli ret = -EFAULT;
63981e392147SAndrea Arcangeli if (idx >= size)
63991e392147SAndrea Arcangeli goto out_release_nounlock;
64001c9e8defSMike Kravetz
64011e392147SAndrea Arcangeli /*
64021e392147SAndrea Arcangeli * Serialization between remove_inode_hugepages() and
64037e1813d4SMike Kravetz * hugetlb_add_to_page_cache() below happens through the
64041e392147SAndrea Arcangeli * hugetlb_fault_mutex_table that here must be hold by
64051e392147SAndrea Arcangeli * the caller.
64061e392147SAndrea Arcangeli */
64079b91c0e2SSidhartha Kumar ret = hugetlb_add_to_page_cache(folio, mapping, idx);
64081c9e8defSMike Kravetz if (ret)
64091c9e8defSMike Kravetz goto out_release_nounlock;
6410d0ce0e47SSidhartha Kumar folio_in_pagecache = true;
64111c9e8defSMike Kravetz }
64121c9e8defSMike Kravetz
6413bcc66543SMiaohe Lin ptl = huge_pte_lock(h, dst_mm, dst_pte);
64148fb5debcSMike Kravetz
64158625147cSJames Houghton ret = -EIO;
6416d0ce0e47SSidhartha Kumar if (folio_test_hwpoison(folio))
64178625147cSJames Houghton goto out_release_unlock;
64188625147cSJames Houghton
64191e392147SAndrea Arcangeli /*
64206041c691SPeter Xu * We allow to overwrite a pte marker: consider when both MISSING|WP
64216041c691SPeter Xu * registered, we firstly wr-protect a none pte which has no page cache
64226041c691SPeter Xu * page backing it, then access the page.
64236041c691SPeter Xu */
6424fa27759aSMike Kravetz ret = -EEXIST;
64256041c691SPeter Xu if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
64268fb5debcSMike Kravetz goto out_release_unlock;
64278fb5debcSMike Kravetz
6428d0ce0e47SSidhartha Kumar if (folio_in_pagecache)
6429d0ce0e47SSidhartha Kumar page_dup_file_rmap(&folio->page, true);
64304781593dSPeter Xu else
6431d0ce0e47SSidhartha Kumar hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
64328fb5debcSMike Kravetz
64336041c691SPeter Xu /*
64346041c691SPeter Xu * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
64356041c691SPeter Xu * with wp flag set, don't set pte write bit.
64366041c691SPeter Xu */
6437d9712937SAxel Rasmussen if (wp_enabled || (is_continue && !vm_shared))
6438f6191471SAxel Rasmussen writable = 0;
6439f6191471SAxel Rasmussen else
6440f6191471SAxel Rasmussen writable = dst_vma->vm_flags & VM_WRITE;
6441f6191471SAxel Rasmussen
6442d0ce0e47SSidhartha Kumar _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
64436041c691SPeter Xu /*
64446041c691SPeter Xu * Always mark UFFDIO_COPY page dirty; note that this may not be
64456041c691SPeter Xu * extremely important for hugetlbfs for now since swapping is not
64466041c691SPeter Xu * supported, but we should still be clear in that this page cannot be
64476041c691SPeter Xu * thrown away at will, even if write bit not set.
64486041c691SPeter Xu */
64498fb5debcSMike Kravetz _dst_pte = huge_pte_mkdirty(_dst_pte);
64508fb5debcSMike Kravetz _dst_pte = pte_mkyoung(_dst_pte);
64518fb5debcSMike Kravetz
6452d9712937SAxel Rasmussen if (wp_enabled)
64536041c691SPeter Xu _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
64546041c691SPeter Xu
6455935d4f0cSRyan Roberts set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
64568fb5debcSMike Kravetz
64578fb5debcSMike Kravetz hugetlb_count_add(pages_per_huge_page(h), dst_mm);
64588fb5debcSMike Kravetz
64598fb5debcSMike Kravetz /* No need to invalidate - it was non-present before */
64608fb5debcSMike Kravetz update_mmu_cache(dst_vma, dst_addr, dst_pte);
64618fb5debcSMike Kravetz
64628fb5debcSMike Kravetz spin_unlock(ptl);
6463f6191471SAxel Rasmussen if (!is_continue)
6464d0ce0e47SSidhartha Kumar folio_set_hugetlb_migratable(folio);
6465f6191471SAxel Rasmussen if (vm_shared || is_continue)
6466d0ce0e47SSidhartha Kumar folio_unlock(folio);
64678fb5debcSMike Kravetz ret = 0;
64688fb5debcSMike Kravetz out:
64698fb5debcSMike Kravetz return ret;
64708fb5debcSMike Kravetz out_release_unlock:
64718fb5debcSMike Kravetz spin_unlock(ptl);
6472f6191471SAxel Rasmussen if (vm_shared || is_continue)
6473d0ce0e47SSidhartha Kumar folio_unlock(folio);
64745af10dfdSAndrea Arcangeli out_release_nounlock:
6475d0ce0e47SSidhartha Kumar if (!folio_in_pagecache)
6476d2d7bb44SSidhartha Kumar restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6477d0ce0e47SSidhartha Kumar folio_put(folio);
64788fb5debcSMike Kravetz goto out;
64798fb5debcSMike Kravetz }
6480714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
64818fb5debcSMike Kravetz
hugetlb_follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned int * page_mask)648257a196a5SMike Kravetz struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
64835502ea44SPeter Xu unsigned long address, unsigned int flags,
64845502ea44SPeter Xu unsigned int *page_mask)
648557a196a5SMike Kravetz {
648657a196a5SMike Kravetz struct hstate *h = hstate_vma(vma);
648757a196a5SMike Kravetz struct mm_struct *mm = vma->vm_mm;
648857a196a5SMike Kravetz unsigned long haddr = address & huge_page_mask(h);
648957a196a5SMike Kravetz struct page *page = NULL;
649057a196a5SMike Kravetz spinlock_t *ptl;
649157a196a5SMike Kravetz pte_t *pte, entry;
6492458568c9SPeter Xu int ret;
649357a196a5SMike Kravetz
64947d049f3aSPeter Xu hugetlb_vma_lock_read(vma);
64959c67a207SPeter Xu pte = hugetlb_walk(vma, haddr, huge_page_size(h));
649657a196a5SMike Kravetz if (!pte)
64977d049f3aSPeter Xu goto out_unlock;
649857a196a5SMike Kravetz
649957a196a5SMike Kravetz ptl = huge_pte_lock(h, mm, pte);
650057a196a5SMike Kravetz entry = huge_ptep_get(pte);
650157a196a5SMike Kravetz if (pte_present(entry)) {
6502458568c9SPeter Xu page = pte_page(entry);
6503458568c9SPeter Xu
6504458568c9SPeter Xu if (!huge_pte_write(entry)) {
6505458568c9SPeter Xu if (flags & FOLL_WRITE) {
6506458568c9SPeter Xu page = NULL;
6507458568c9SPeter Xu goto out;
6508458568c9SPeter Xu }
6509458568c9SPeter Xu
6510458568c9SPeter Xu if (gup_must_unshare(vma, flags, page)) {
6511458568c9SPeter Xu /* Tell the caller to do unsharing */
6512458568c9SPeter Xu page = ERR_PTR(-EMLINK);
6513458568c9SPeter Xu goto out;
6514458568c9SPeter Xu }
6515458568c9SPeter Xu }
6516458568c9SPeter Xu
651718576d12SZi Yan page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
6518458568c9SPeter Xu
651957a196a5SMike Kravetz /*
652057a196a5SMike Kravetz * Note that page may be a sub-page, and with vmemmap
652157a196a5SMike Kravetz * optimizations the page struct may be read only.
652257a196a5SMike Kravetz * try_grab_page() will increase the ref count on the
652357a196a5SMike Kravetz * head page, so this will be OK.
652457a196a5SMike Kravetz *
6525e2ca6ba6SLinus Torvalds * try_grab_page() should always be able to get the page here,
6526e2ca6ba6SLinus Torvalds * because we hold the ptl lock and have verified pte_present().
652757a196a5SMike Kravetz */
6528458568c9SPeter Xu ret = try_grab_page(page, flags);
6529458568c9SPeter Xu
6530458568c9SPeter Xu if (WARN_ON_ONCE(ret)) {
6531458568c9SPeter Xu page = ERR_PTR(ret);
653257a196a5SMike Kravetz goto out;
653357a196a5SMike Kravetz }
65345502ea44SPeter Xu
65355502ea44SPeter Xu *page_mask = (1U << huge_page_order(h)) - 1;
653657a196a5SMike Kravetz }
653757a196a5SMike Kravetz out:
653857a196a5SMike Kravetz spin_unlock(ptl);
65397d049f3aSPeter Xu out_unlock:
65407d049f3aSPeter Xu hugetlb_vma_unlock_read(vma);
6541dd767aaaSPeter Xu
6542dd767aaaSPeter Xu /*
6543dd767aaaSPeter Xu * Fixup retval for dump requests: if pagecache doesn't exist,
6544dd767aaaSPeter Xu * don't try to allocate a new page but just skip it.
6545dd767aaaSPeter Xu */
6546dd767aaaSPeter Xu if (!page && (flags & FOLL_DUMP) &&
6547dd767aaaSPeter Xu !hugetlbfs_pagecache_present(h, vma, address))
6548dd767aaaSPeter Xu page = ERR_PTR(-EFAULT);
6549dd767aaaSPeter Xu
655057a196a5SMike Kravetz return page;
655157a196a5SMike Kravetz }
655257a196a5SMike Kravetz
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)6553a79390f5SPeter Xu long hugetlb_change_protection(struct vm_area_struct *vma,
65545a90d5a1SPeter Xu unsigned long address, unsigned long end,
65555a90d5a1SPeter Xu pgprot_t newprot, unsigned long cp_flags)
65568f860591SZhang, Yanmin {
65578f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm;
65588f860591SZhang, Yanmin unsigned long start = address;
65598f860591SZhang, Yanmin pte_t *ptep;
65608f860591SZhang, Yanmin pte_t pte;
6561a5516438SAndi Kleen struct hstate *h = hstate_vma(vma);
6562a79390f5SPeter Xu long pages = 0, psize = huge_page_size(h);
6563dff11abeSMike Kravetz bool shared_pmd = false;
6564ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
6565e95a9851SMike Kravetz unsigned long last_addr_mask;
65665a90d5a1SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
65675a90d5a1SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6568dff11abeSMike Kravetz
6569dff11abeSMike Kravetz /*
6570dff11abeSMike Kravetz * In the case of shared PMDs, the area to flush could be beyond
6571ac46d4f3SJérôme Glisse * start/end. Set range.start/range.end to cover the maximum possible
6572dff11abeSMike Kravetz * range if PMD sharing is possible.
6573dff11abeSMike Kravetz */
65747269f999SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
65757d4a8be0SAlistair Popple 0, mm, start, end);
6576ac46d4f3SJérôme Glisse adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
65778f860591SZhang, Yanmin
65788f860591SZhang, Yanmin BUG_ON(address >= end);
6579ac46d4f3SJérôme Glisse flush_cache_range(vma, range.start, range.end);
65808f860591SZhang, Yanmin
6581ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
658240549ba8SMike Kravetz hugetlb_vma_lock_write(vma);
658383cde9e8SDavidlohr Bueso i_mmap_lock_write(vma->vm_file->f_mapping);
658440549ba8SMike Kravetz last_addr_mask = hugetlb_mask_last_page(h);
658560dfaad6SPeter Xu for (; address < end; address += psize) {
6586cb900f41SKirill A. Shutemov spinlock_t *ptl;
65879c67a207SPeter Xu ptep = hugetlb_walk(vma, address, psize);
6588e95a9851SMike Kravetz if (!ptep) {
6589fed15f13SPeter Xu if (!uffd_wp) {
6590e95a9851SMike Kravetz address |= last_addr_mask;
65918f860591SZhang, Yanmin continue;
6592e95a9851SMike Kravetz }
6593fed15f13SPeter Xu /*
6594fed15f13SPeter Xu * Userfaultfd wr-protect requires pgtable
6595fed15f13SPeter Xu * pre-allocations to install pte markers.
6596fed15f13SPeter Xu */
6597fed15f13SPeter Xu ptep = huge_pte_alloc(mm, vma, address, psize);
6598d1751118SPeter Xu if (!ptep) {
6599d1751118SPeter Xu pages = -ENOMEM;
6600fed15f13SPeter Xu break;
6601fed15f13SPeter Xu }
6602d1751118SPeter Xu }
6603cb900f41SKirill A. Shutemov ptl = huge_pte_lock(h, mm, ptep);
66044ddb4d91SMike Kravetz if (huge_pmd_unshare(mm, vma, address, ptep)) {
660560dfaad6SPeter Xu /*
660660dfaad6SPeter Xu * When uffd-wp is enabled on the vma, unshare
660760dfaad6SPeter Xu * shouldn't happen at all. Warn about it if it
660860dfaad6SPeter Xu * happened due to some reason.
660960dfaad6SPeter Xu */
661060dfaad6SPeter Xu WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
66117da4d641SPeter Zijlstra pages++;
6612cb900f41SKirill A. Shutemov spin_unlock(ptl);
6613dff11abeSMike Kravetz shared_pmd = true;
66144ddb4d91SMike Kravetz address |= last_addr_mask;
661539dde65cSChen, Kenneth W continue;
66167da4d641SPeter Zijlstra }
6617a8bda28dSNaoya Horiguchi pte = huge_ptep_get(ptep);
6618a8bda28dSNaoya Horiguchi if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
66190e678153SDavid Hildenbrand /* Nothing to do. */
66200e678153SDavid Hildenbrand } else if (unlikely(is_hugetlb_entry_migration(pte))) {
6621a8bda28dSNaoya Horiguchi swp_entry_t entry = pte_to_swp_entry(pte);
66226c287605SDavid Hildenbrand struct page *page = pfn_swap_entry_to_page(entry);
662344f86392SDavid Hildenbrand pte_t newpte = pte;
6624a8bda28dSNaoya Horiguchi
662544f86392SDavid Hildenbrand if (is_writable_migration_entry(entry)) {
66266c287605SDavid Hildenbrand if (PageAnon(page))
66276c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(
66286c287605SDavid Hildenbrand swp_offset(entry));
66296c287605SDavid Hildenbrand else
66304dd845b5SAlistair Popple entry = make_readable_migration_entry(
66314dd845b5SAlistair Popple swp_offset(entry));
6632a8bda28dSNaoya Horiguchi newpte = swp_entry_to_pte(entry);
663344f86392SDavid Hildenbrand pages++;
663444f86392SDavid Hildenbrand }
663544f86392SDavid Hildenbrand
66365a90d5a1SPeter Xu if (uffd_wp)
66375a90d5a1SPeter Xu newpte = pte_swp_mkuffd_wp(newpte);
66385a90d5a1SPeter Xu else if (uffd_wp_resolve)
66395a90d5a1SPeter Xu newpte = pte_swp_clear_uffd_wp(newpte);
664044f86392SDavid Hildenbrand if (!pte_same(pte, newpte))
6641935d4f0cSRyan Roberts set_huge_pte_at(mm, address, ptep, newpte, psize);
66420e678153SDavid Hildenbrand } else if (unlikely(is_pte_marker(pte))) {
6643db01bfbdSPeter Xu /*
6644db01bfbdSPeter Xu * Do nothing on a poison marker; page is
6645db01bfbdSPeter Xu * corrupted, permissons do not apply. Here
6646db01bfbdSPeter Xu * pte_marker_uffd_wp()==true implies !poison
6647db01bfbdSPeter Xu * because they're mutual exclusive.
6648db01bfbdSPeter Xu */
6649db01bfbdSPeter Xu if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
66500e678153SDavid Hildenbrand /* Safe to modify directly (non-present->none). */
665160dfaad6SPeter Xu huge_pte_clear(mm, address, ptep, psize);
66520e678153SDavid Hildenbrand } else if (!huge_pte_none(pte)) {
6653023bdd00SAneesh Kumar K.V pte_t old_pte;
665479c1c594SChristophe Leroy unsigned int shift = huge_page_shift(hstate_vma(vma));
6655023bdd00SAneesh Kumar K.V
6656023bdd00SAneesh Kumar K.V old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
665716785bd7SAnshuman Khandual pte = huge_pte_modify(old_pte, newprot);
665879c1c594SChristophe Leroy pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
66595a90d5a1SPeter Xu if (uffd_wp)
6660f1eb1bacSPeter Xu pte = huge_pte_mkuffd_wp(pte);
66615a90d5a1SPeter Xu else if (uffd_wp_resolve)
66625a90d5a1SPeter Xu pte = huge_pte_clear_uffd_wp(pte);
6663023bdd00SAneesh Kumar K.V huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
66647da4d641SPeter Zijlstra pages++;
666560dfaad6SPeter Xu } else {
666660dfaad6SPeter Xu /* None pte */
666760dfaad6SPeter Xu if (unlikely(uffd_wp))
666860dfaad6SPeter Xu /* Safe to modify directly (none->non-present). */
666960dfaad6SPeter Xu set_huge_pte_at(mm, address, ptep,
6670935d4f0cSRyan Roberts make_pte_marker(PTE_MARKER_UFFD_WP),
6671935d4f0cSRyan Roberts psize);
66728f860591SZhang, Yanmin }
6673cb900f41SKirill A. Shutemov spin_unlock(ptl);
66748f860591SZhang, Yanmin }
6675d833352aSMel Gorman /*
6676c8c06efaSDavidlohr Bueso * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6677d833352aSMel Gorman * may have cleared our pud entry and done put_page on the page table:
6678c8c06efaSDavidlohr Bueso * once we release i_mmap_rwsem, another task can do the final put_page
6679dff11abeSMike Kravetz * and that page table be reused and filled with junk. If we actually
6680dff11abeSMike Kravetz * did unshare a page of pmds, flush the range corresponding to the pud.
6681d833352aSMel Gorman */
6682dff11abeSMike Kravetz if (shared_pmd)
6683ac46d4f3SJérôme Glisse flush_hugetlb_tlb_range(vma, range.start, range.end);
6684dff11abeSMike Kravetz else
66855491ae7bSAneesh Kumar K.V flush_hugetlb_tlb_range(vma, start, end);
66860f10851eSJérôme Glisse /*
66871af5a810SAlistair Popple * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
66881af5a810SAlistair Popple * downgrading page table protection not changing it to point to a new
66891af5a810SAlistair Popple * page.
66900f10851eSJérôme Glisse *
6691ee65728eSMike Rapoport * See Documentation/mm/mmu_notifier.rst
66920f10851eSJérôme Glisse */
669383cde9e8SDavidlohr Bueso i_mmap_unlock_write(vma->vm_file->f_mapping);
669440549ba8SMike Kravetz hugetlb_vma_unlock_write(vma);
6695ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
66967da4d641SPeter Zijlstra
6697d1751118SPeter Xu return pages > 0 ? (pages << h->order) : pages;
66988f860591SZhang, Yanmin }
66998f860591SZhang, Yanmin
670033b8f84aSMike Kravetz /* Return true if reservation was successful, false otherwise. */
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_struct * vma,vm_flags_t vm_flags)670133b8f84aSMike Kravetz bool hugetlb_reserve_pages(struct inode *inode,
6702a1e78772SMel Gorman long from, long to,
67035a6fe125SMel Gorman struct vm_area_struct *vma,
6704ca16d140SKOSAKI Motohiro vm_flags_t vm_flags)
6705e4e574b7SAdam Litke {
6706c5094ec7SMike Kravetz long chg = -1, add = -1;
6707a5516438SAndi Kleen struct hstate *h = hstate_inode(inode);
670890481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode);
67099119a41eSJoonsoo Kim struct resv_map *resv_map;
6710075a61d0SMina Almasry struct hugetlb_cgroup *h_cg = NULL;
67110db9d74eSMina Almasry long gbl_reserve, regions_needed = 0;
6712e4e574b7SAdam Litke
671363489f8eSMike Kravetz /* This should never happen */
671463489f8eSMike Kravetz if (from > to) {
671563489f8eSMike Kravetz VM_WARN(1, "%s called with a negative range\n", __func__);
671633b8f84aSMike Kravetz return false;
671763489f8eSMike Kravetz }
671863489f8eSMike Kravetz
6719a1e78772SMel Gorman /*
6720e700898fSMike Kravetz * vma specific semaphore used for pmd sharing and fault/truncation
6721e700898fSMike Kravetz * synchronization
67228d9bfb26SMike Kravetz */
67238d9bfb26SMike Kravetz hugetlb_vma_lock_alloc(vma);
67248d9bfb26SMike Kravetz
67258d9bfb26SMike Kravetz /*
672617c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an
672717c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page
672890481622SDavid Gibson * without using reserves
672917c9d12eSMel Gorman */
6730ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE)
673133b8f84aSMike Kravetz return true;
673217c9d12eSMel Gorman
673317c9d12eSMel Gorman /*
6734a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that
6735a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need
6736a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be
6737a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping
6738a1e78772SMel Gorman */
67399119a41eSJoonsoo Kim if (!vma || vma->vm_flags & VM_MAYSHARE) {
6740f27a5136SMike Kravetz /*
6741f27a5136SMike Kravetz * resv_map can not be NULL as hugetlb_reserve_pages is only
6742f27a5136SMike Kravetz * called for inodes for which resv_maps were created (see
6743f27a5136SMike Kravetz * hugetlbfs_get_inode).
6744f27a5136SMike Kravetz */
67454e35f483SJoonsoo Kim resv_map = inode_resv_map(inode);
67469119a41eSJoonsoo Kim
67470db9d74eSMina Almasry chg = region_chg(resv_map, from, to, ®ions_needed);
67489119a41eSJoonsoo Kim } else {
6749e9fe92aeSMina Almasry /* Private mapping. */
67509119a41eSJoonsoo Kim resv_map = resv_map_alloc();
67515a6fe125SMel Gorman if (!resv_map)
67528d9bfb26SMike Kravetz goto out_err;
67535a6fe125SMel Gorman
675417c9d12eSMel Gorman chg = to - from;
675517c9d12eSMel Gorman
67565a6fe125SMel Gorman set_vma_resv_map(vma, resv_map);
67575a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
67585a6fe125SMel Gorman }
67595a6fe125SMel Gorman
676033b8f84aSMike Kravetz if (chg < 0)
6761c50ac050SDave Hansen goto out_err;
676217c9d12eSMel Gorman
676333b8f84aSMike Kravetz if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
676433b8f84aSMike Kravetz chg * pages_per_huge_page(h), &h_cg) < 0)
6765075a61d0SMina Almasry goto out_err;
6766075a61d0SMina Almasry
6767075a61d0SMina Almasry if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
6768075a61d0SMina Almasry /* For private mappings, the hugetlb_cgroup uncharge info hangs
6769075a61d0SMina Almasry * of the resv_map.
6770075a61d0SMina Almasry */
6771075a61d0SMina Almasry resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6772075a61d0SMina Almasry }
6773075a61d0SMina Almasry
67741c5ecae3SMike Kravetz /*
67751c5ecae3SMike Kravetz * There must be enough pages in the subpool for the mapping. If
67761c5ecae3SMike Kravetz * the subpool has a minimum size, there may be some global
67771c5ecae3SMike Kravetz * reservations already in place (gbl_reserve).
67781c5ecae3SMike Kravetz */
67791c5ecae3SMike Kravetz gbl_reserve = hugepage_subpool_get_pages(spool, chg);
678033b8f84aSMike Kravetz if (gbl_reserve < 0)
6781075a61d0SMina Almasry goto out_uncharge_cgroup;
678217c9d12eSMel Gorman
678317c9d12eSMel Gorman /*
678417c9d12eSMel Gorman * Check enough hugepages are available for the reservation.
678590481622SDavid Gibson * Hand the pages back to the subpool if there are not
678617c9d12eSMel Gorman */
678733b8f84aSMike Kravetz if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6788075a61d0SMina Almasry goto out_put_pages;
678917c9d12eSMel Gorman
679017c9d12eSMel Gorman /*
679117c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions
679217c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs.
679317c9d12eSMel Gorman * When the last VMA disappears, the region map says how much
679417c9d12eSMel Gorman * the reservation was and the page cache tells how much of
679517c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and
679617c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA
679717c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the
679817c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing
679917c9d12eSMel Gorman * else has to be done for private mappings here
680017c9d12eSMel Gorman */
680133039678SMike Kravetz if (!vma || vma->vm_flags & VM_MAYSHARE) {
6802075a61d0SMina Almasry add = region_add(resv_map, from, to, regions_needed, h, h_cg);
680333039678SMike Kravetz
68040db9d74eSMina Almasry if (unlikely(add < 0)) {
68050db9d74eSMina Almasry hugetlb_acct_memory(h, -gbl_reserve);
6806075a61d0SMina Almasry goto out_put_pages;
68070db9d74eSMina Almasry } else if (unlikely(chg > add)) {
680833039678SMike Kravetz /*
680933039678SMike Kravetz * pages in this range were added to the reserve
681033039678SMike Kravetz * map between region_chg and region_add. This
6811d0ce0e47SSidhartha Kumar * indicates a race with alloc_hugetlb_folio. Adjust
681233039678SMike Kravetz * the subpool and reserve counts modified above
681333039678SMike Kravetz * based on the difference.
681433039678SMike Kravetz */
681533039678SMike Kravetz long rsv_adjust;
681633039678SMike Kravetz
6817d85aecf2SMiaohe Lin /*
6818d85aecf2SMiaohe Lin * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6819d85aecf2SMiaohe Lin * reference to h_cg->css. See comment below for detail.
6820d85aecf2SMiaohe Lin */
6821075a61d0SMina Almasry hugetlb_cgroup_uncharge_cgroup_rsvd(
6822075a61d0SMina Almasry hstate_index(h),
6823075a61d0SMina Almasry (chg - add) * pages_per_huge_page(h), h_cg);
6824075a61d0SMina Almasry
682533039678SMike Kravetz rsv_adjust = hugepage_subpool_put_pages(spool,
682633039678SMike Kravetz chg - add);
682733039678SMike Kravetz hugetlb_acct_memory(h, -rsv_adjust);
6828d85aecf2SMiaohe Lin } else if (h_cg) {
6829d85aecf2SMiaohe Lin /*
6830d85aecf2SMiaohe Lin * The file_regions will hold their own reference to
6831d85aecf2SMiaohe Lin * h_cg->css. So we should release the reference held
6832d85aecf2SMiaohe Lin * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6833d85aecf2SMiaohe Lin * done.
6834d85aecf2SMiaohe Lin */
6835d85aecf2SMiaohe Lin hugetlb_cgroup_put_rsvd_cgroup(h_cg);
683633039678SMike Kravetz }
683733039678SMike Kravetz }
683833b8f84aSMike Kravetz return true;
683933b8f84aSMike Kravetz
6840075a61d0SMina Almasry out_put_pages:
6841075a61d0SMina Almasry /* put back original number of pages, chg */
6842075a61d0SMina Almasry (void)hugepage_subpool_put_pages(spool, chg);
6843075a61d0SMina Almasry out_uncharge_cgroup:
6844075a61d0SMina Almasry hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6845075a61d0SMina Almasry chg * pages_per_huge_page(h), h_cg);
6846c50ac050SDave Hansen out_err:
68478d9bfb26SMike Kravetz hugetlb_vma_lock_free(vma);
68485e911373SMike Kravetz if (!vma || vma->vm_flags & VM_MAYSHARE)
68490db9d74eSMina Almasry /* Only call region_abort if the region_chg succeeded but the
68500db9d74eSMina Almasry * region_add failed or didn't run.
68510db9d74eSMina Almasry */
68520db9d74eSMina Almasry if (chg >= 0 && add < 0)
68530db9d74eSMina Almasry region_abort(resv_map, from, to, regions_needed);
685492fe9dcbSRik van Riel if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
6855f031dd27SJoonsoo Kim kref_put(&resv_map->refs, resv_map_release);
685692fe9dcbSRik van Riel set_vma_resv_map(vma, NULL);
685792fe9dcbSRik van Riel }
685833b8f84aSMike Kravetz return false;
6859a43a8c39SChen, Kenneth W }
6860a43a8c39SChen, Kenneth W
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)6861b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6862b5cec28dSMike Kravetz long freed)
6863a43a8c39SChen, Kenneth W {
6864a5516438SAndi Kleen struct hstate *h = hstate_inode(inode);
68654e35f483SJoonsoo Kim struct resv_map *resv_map = inode_resv_map(inode);
68669119a41eSJoonsoo Kim long chg = 0;
686790481622SDavid Gibson struct hugepage_subpool *spool = subpool_inode(inode);
68681c5ecae3SMike Kravetz long gbl_reserve;
686945c682a6SKen Chen
6870f27a5136SMike Kravetz /*
6871f27a5136SMike Kravetz * Since this routine can be called in the evict inode path for all
6872f27a5136SMike Kravetz * hugetlbfs inodes, resv_map could be NULL.
6873f27a5136SMike Kravetz */
6874b5cec28dSMike Kravetz if (resv_map) {
6875b5cec28dSMike Kravetz chg = region_del(resv_map, start, end);
6876b5cec28dSMike Kravetz /*
6877b5cec28dSMike Kravetz * region_del() can fail in the rare case where a region
6878b5cec28dSMike Kravetz * must be split and another region descriptor can not be
6879b5cec28dSMike Kravetz * allocated. If end == LONG_MAX, it will not fail.
6880b5cec28dSMike Kravetz */
6881b5cec28dSMike Kravetz if (chg < 0)
6882b5cec28dSMike Kravetz return chg;
6883b5cec28dSMike Kravetz }
6884b5cec28dSMike Kravetz
688545c682a6SKen Chen spin_lock(&inode->i_lock);
6886e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed);
688745c682a6SKen Chen spin_unlock(&inode->i_lock);
688845c682a6SKen Chen
68891c5ecae3SMike Kravetz /*
68901c5ecae3SMike Kravetz * If the subpool has a minimum size, the number of global
68911c5ecae3SMike Kravetz * reservations to be released may be adjusted.
6892dddf31a4SMiaohe Lin *
6893dddf31a4SMiaohe Lin * Note that !resv_map implies freed == 0. So (chg - freed)
6894dddf31a4SMiaohe Lin * won't go negative.
68951c5ecae3SMike Kravetz */
68961c5ecae3SMike Kravetz gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
68971c5ecae3SMike Kravetz hugetlb_acct_memory(h, -gbl_reserve);
6898b5cec28dSMike Kravetz
6899b5cec28dSMike Kravetz return 0;
6900a43a8c39SChen, Kenneth W }
690193f70f90SNaoya Horiguchi
69023212b535SSteve Capper #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)69033212b535SSteve Capper static unsigned long page_table_shareable(struct vm_area_struct *svma,
69043212b535SSteve Capper struct vm_area_struct *vma,
69053212b535SSteve Capper unsigned long addr, pgoff_t idx)
69063212b535SSteve Capper {
69073212b535SSteve Capper unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
69083212b535SSteve Capper svma->vm_start;
69093212b535SSteve Capper unsigned long sbase = saddr & PUD_MASK;
69103212b535SSteve Capper unsigned long s_end = sbase + PUD_SIZE;
69113212b535SSteve Capper
69123212b535SSteve Capper /* Allow segments to share if only one is marked locked */
6913e430a95aSSuren Baghdasaryan unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6914e430a95aSSuren Baghdasaryan unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
69153212b535SSteve Capper
69163212b535SSteve Capper /*
69173212b535SSteve Capper * match the virtual addresses, permission and the alignment of the
69183212b535SSteve Capper * page table page.
6919131a79b4SMike Kravetz *
6920131a79b4SMike Kravetz * Also, vma_lock (vm_private_data) is required for sharing.
69213212b535SSteve Capper */
69223212b535SSteve Capper if (pmd_index(addr) != pmd_index(saddr) ||
69233212b535SSteve Capper vm_flags != svm_flags ||
6924131a79b4SMike Kravetz !range_in_vma(svma, sbase, s_end) ||
6925131a79b4SMike Kravetz !svma->vm_private_data)
69263212b535SSteve Capper return 0;
69273212b535SSteve Capper
69283212b535SSteve Capper return saddr;
69293212b535SSteve Capper }
69303212b535SSteve Capper
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6931bbff39ccSMike Kravetz bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
69323212b535SSteve Capper {
6933bbff39ccSMike Kravetz unsigned long start = addr & PUD_MASK;
6934bbff39ccSMike Kravetz unsigned long end = start + PUD_SIZE;
6935bbff39ccSMike Kravetz
69368d9bfb26SMike Kravetz #ifdef CONFIG_USERFAULTFD
69378d9bfb26SMike Kravetz if (uffd_disable_huge_pmd_share(vma))
69388d9bfb26SMike Kravetz return false;
69398d9bfb26SMike Kravetz #endif
69403212b535SSteve Capper /*
69413212b535SSteve Capper * check on proper vm_flags and page table alignment
69423212b535SSteve Capper */
69438d9bfb26SMike Kravetz if (!(vma->vm_flags & VM_MAYSHARE))
694431aafb45SNicholas Krause return false;
6945bbff39ccSMike Kravetz if (!vma->vm_private_data) /* vma lock required for sharing */
69468d9bfb26SMike Kravetz return false;
69478d9bfb26SMike Kravetz if (!range_in_vma(vma, start, end))
69488d9bfb26SMike Kravetz return false;
69498d9bfb26SMike Kravetz return true;
69508d9bfb26SMike Kravetz }
69518d9bfb26SMike Kravetz
69523212b535SSteve Capper /*
6953017b1660SMike Kravetz * Determine if start,end range within vma could be mapped by shared pmd.
6954017b1660SMike Kravetz * If yes, adjust start and end to cover range associated with possible
6955017b1660SMike Kravetz * shared pmd mappings.
6956017b1660SMike Kravetz */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6957017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6958017b1660SMike Kravetz unsigned long *start, unsigned long *end)
6959017b1660SMike Kravetz {
6960a1ba9da8SLi Xinhai unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6961a1ba9da8SLi Xinhai v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6962017b1660SMike Kravetz
6963a1ba9da8SLi Xinhai /*
6964f0953a1bSIngo Molnar * vma needs to span at least one aligned PUD size, and the range
6965f0953a1bSIngo Molnar * must be at least partially within in.
6966a1ba9da8SLi Xinhai */
6967a1ba9da8SLi Xinhai if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6968a1ba9da8SLi Xinhai (*end <= v_start) || (*start >= v_end))
6969017b1660SMike Kravetz return;
6970017b1660SMike Kravetz
697175802ca6SPeter Xu /* Extend the range to be PUD aligned for a worst case scenario */
6972a1ba9da8SLi Xinhai if (*start > v_start)
6973a1ba9da8SLi Xinhai *start = ALIGN_DOWN(*start, PUD_SIZE);
6974017b1660SMike Kravetz
6975a1ba9da8SLi Xinhai if (*end < v_end)
6976a1ba9da8SLi Xinhai *end = ALIGN(*end, PUD_SIZE);
6977017b1660SMike Kravetz }
6978017b1660SMike Kravetz
6979017b1660SMike Kravetz /*
69803212b535SSteve Capper * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
69813212b535SSteve Capper * and returns the corresponding pte. While this is not necessary for the
69823212b535SSteve Capper * !shared pmd case because we can allocate the pmd later as well, it makes the
69833a47c54fSMike Kravetz * code much cleaner. pmd allocation is essential for the shared case because
69843a47c54fSMike Kravetz * pud has to be populated inside the same i_mmap_rwsem section - otherwise
69853a47c54fSMike Kravetz * racing tasks could either miss the sharing (see huge_pte_offset) or select a
69863a47c54fSMike Kravetz * bad pmd for sharing.
69873212b535SSteve Capper */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6988aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6989aec44e0fSPeter Xu unsigned long addr, pud_t *pud)
69903212b535SSteve Capper {
69913212b535SSteve Capper struct address_space *mapping = vma->vm_file->f_mapping;
69923212b535SSteve Capper pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
69933212b535SSteve Capper vma->vm_pgoff;
69943212b535SSteve Capper struct vm_area_struct *svma;
69953212b535SSteve Capper unsigned long saddr;
69963212b535SSteve Capper pte_t *spte = NULL;
69973212b535SSteve Capper pte_t *pte;
69983212b535SSteve Capper
69993a47c54fSMike Kravetz i_mmap_lock_read(mapping);
70003212b535SSteve Capper vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
70013212b535SSteve Capper if (svma == vma)
70023212b535SSteve Capper continue;
70033212b535SSteve Capper
70043212b535SSteve Capper saddr = page_table_shareable(svma, vma, addr, idx);
70053212b535SSteve Capper if (saddr) {
70069c67a207SPeter Xu spte = hugetlb_walk(svma, saddr,
70077868a208SPunit Agrawal vma_mmu_pagesize(svma));
70083212b535SSteve Capper if (spte) {
70093212b535SSteve Capper get_page(virt_to_page(spte));
70103212b535SSteve Capper break;
70113212b535SSteve Capper }
70123212b535SSteve Capper }
70133212b535SSteve Capper }
70143212b535SSteve Capper
70153212b535SSteve Capper if (!spte)
70163212b535SSteve Capper goto out;
70173212b535SSteve Capper
7018349d1670SPeter Xu spin_lock(&mm->page_table_lock);
7019dc6c9a35SKirill A. Shutemov if (pud_none(*pud)) {
70203212b535SSteve Capper pud_populate(mm, pud,
70213212b535SSteve Capper (pmd_t *)((unsigned long)spte & PAGE_MASK));
7022c17b1f42SKirill A. Shutemov mm_inc_nr_pmds(mm);
7023dc6c9a35SKirill A. Shutemov } else {
70243212b535SSteve Capper put_page(virt_to_page(spte));
7025dc6c9a35SKirill A. Shutemov }
7026349d1670SPeter Xu spin_unlock(&mm->page_table_lock);
70273212b535SSteve Capper out:
70283212b535SSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr);
70293a47c54fSMike Kravetz i_mmap_unlock_read(mapping);
70303212b535SSteve Capper return pte;
70313212b535SSteve Capper }
70323212b535SSteve Capper
70333212b535SSteve Capper /*
70343212b535SSteve Capper * unmap huge page backed by shared pte.
70353212b535SSteve Capper *
70363212b535SSteve Capper * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
70373212b535SSteve Capper * indicated by page_count > 1, unmap is achieved by clearing pud and
70383212b535SSteve Capper * decrementing the ref count. If count == 1, the pte page is not shared.
70393212b535SSteve Capper *
70403a47c54fSMike Kravetz * Called with page table lock held.
70413212b535SSteve Capper *
70423212b535SSteve Capper * returns: 1 successfully unmapped a shared pte page
70433212b535SSteve Capper * 0 the underlying pte page is not shared, or it is the last user
70443212b535SSteve Capper */
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)704534ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
70464ddb4d91SMike Kravetz unsigned long addr, pte_t *ptep)
70473212b535SSteve Capper {
70484ddb4d91SMike Kravetz pgd_t *pgd = pgd_offset(mm, addr);
70494ddb4d91SMike Kravetz p4d_t *p4d = p4d_offset(pgd, addr);
70504ddb4d91SMike Kravetz pud_t *pud = pud_offset(p4d, addr);
70513212b535SSteve Capper
705234ae204fSMike Kravetz i_mmap_assert_write_locked(vma->vm_file->f_mapping);
705340549ba8SMike Kravetz hugetlb_vma_assert_locked(vma);
70543212b535SSteve Capper BUG_ON(page_count(virt_to_page(ptep)) == 0);
70553212b535SSteve Capper if (page_count(virt_to_page(ptep)) == 1)
70563212b535SSteve Capper return 0;
70573212b535SSteve Capper
70583212b535SSteve Capper pud_clear(pud);
70593212b535SSteve Capper put_page(virt_to_page(ptep));
7060dc6c9a35SKirill A. Shutemov mm_dec_nr_pmds(mm);
70613212b535SSteve Capper return 1;
70623212b535SSteve Capper }
7063c1991e07SPeter Xu
70649e5fc74cSSteve Capper #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
70658d9bfb26SMike Kravetz
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)7066aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7067aec44e0fSPeter Xu unsigned long addr, pud_t *pud)
70689e5fc74cSSteve Capper {
70699e5fc74cSSteve Capper return NULL;
70709e5fc74cSSteve Capper }
7071e81f2d22SZhang Zhen
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)707234ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
70734ddb4d91SMike Kravetz unsigned long addr, pte_t *ptep)
7074e81f2d22SZhang Zhen {
7075e81f2d22SZhang Zhen return 0;
7076e81f2d22SZhang Zhen }
7077017b1660SMike Kravetz
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)7078017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7079017b1660SMike Kravetz unsigned long *start, unsigned long *end)
7080017b1660SMike Kravetz {
7081017b1660SMike Kravetz }
7082c1991e07SPeter Xu
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)7083c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7084c1991e07SPeter Xu {
7085c1991e07SPeter Xu return false;
7086c1991e07SPeter Xu }
70873212b535SSteve Capper #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
70883212b535SSteve Capper
70899e5fc74cSSteve Capper #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)7090aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70919e5fc74cSSteve Capper unsigned long addr, unsigned long sz)
70929e5fc74cSSteve Capper {
70939e5fc74cSSteve Capper pgd_t *pgd;
7094c2febafcSKirill A. Shutemov p4d_t *p4d;
70959e5fc74cSSteve Capper pud_t *pud;
70969e5fc74cSSteve Capper pte_t *pte = NULL;
70979e5fc74cSSteve Capper
70989e5fc74cSSteve Capper pgd = pgd_offset(mm, addr);
7099f4f0a3d8SKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr);
7100f4f0a3d8SKirill A. Shutemov if (!p4d)
7101f4f0a3d8SKirill A. Shutemov return NULL;
7102c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr);
71039e5fc74cSSteve Capper if (pud) {
71049e5fc74cSSteve Capper if (sz == PUD_SIZE) {
71059e5fc74cSSteve Capper pte = (pte_t *)pud;
71069e5fc74cSSteve Capper } else {
71079e5fc74cSSteve Capper BUG_ON(sz != PMD_SIZE);
7108c1991e07SPeter Xu if (want_pmd_share(vma, addr) && pud_none(*pud))
7109aec44e0fSPeter Xu pte = huge_pmd_share(mm, vma, addr, pud);
71109e5fc74cSSteve Capper else
71119e5fc74cSSteve Capper pte = (pte_t *)pmd_alloc(mm, pud, addr);
71129e5fc74cSSteve Capper }
71139e5fc74cSSteve Capper }
7114191fcdb6SJohn Hubbard
7115191fcdb6SJohn Hubbard if (pte) {
7116191fcdb6SJohn Hubbard pte_t pteval = ptep_get_lockless(pte);
7117191fcdb6SJohn Hubbard
7118191fcdb6SJohn Hubbard BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7119191fcdb6SJohn Hubbard }
71209e5fc74cSSteve Capper
71219e5fc74cSSteve Capper return pte;
71229e5fc74cSSteve Capper }
71239e5fc74cSSteve Capper
71249b19df29SPunit Agrawal /*
71259b19df29SPunit Agrawal * huge_pte_offset() - Walk the page table to resolve the hugepage
71269b19df29SPunit Agrawal * entry at address @addr
71279b19df29SPunit Agrawal *
71288ac0b81aSLi Xinhai * Return: Pointer to page table entry (PUD or PMD) for
71298ac0b81aSLi Xinhai * address @addr, or NULL if a !p*d_present() entry is encountered and the
71309b19df29SPunit Agrawal * size @sz doesn't match the hugepage size at this level of the page
71319b19df29SPunit Agrawal * table.
71329b19df29SPunit Agrawal */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)71337868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
71347868a208SPunit Agrawal unsigned long addr, unsigned long sz)
71359e5fc74cSSteve Capper {
71369e5fc74cSSteve Capper pgd_t *pgd;
7137c2febafcSKirill A. Shutemov p4d_t *p4d;
71388ac0b81aSLi Xinhai pud_t *pud;
71398ac0b81aSLi Xinhai pmd_t *pmd;
71409e5fc74cSSteve Capper
71419e5fc74cSSteve Capper pgd = pgd_offset(mm, addr);
7142c2febafcSKirill A. Shutemov if (!pgd_present(*pgd))
7143c2febafcSKirill A. Shutemov return NULL;
7144c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
7145c2febafcSKirill A. Shutemov if (!p4d_present(*p4d))
7146c2febafcSKirill A. Shutemov return NULL;
71479b19df29SPunit Agrawal
7148c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr);
71498ac0b81aSLi Xinhai if (sz == PUD_SIZE)
71508ac0b81aSLi Xinhai /* must be pud huge, non-present or none */
71519e5fc74cSSteve Capper return (pte_t *)pud;
71528ac0b81aSLi Xinhai if (!pud_present(*pud))
71538ac0b81aSLi Xinhai return NULL;
71548ac0b81aSLi Xinhai /* must have a valid entry and size to go further */
71559b19df29SPunit Agrawal
71569e5fc74cSSteve Capper pmd = pmd_offset(pud, addr);
71578ac0b81aSLi Xinhai /* must be pmd huge, non-present or none */
71589e5fc74cSSteve Capper return (pte_t *)pmd;
71599e5fc74cSSteve Capper }
71609e5fc74cSSteve Capper
7161e95a9851SMike Kravetz /*
7162e95a9851SMike Kravetz * Return a mask that can be used to update an address to the last huge
7163e95a9851SMike Kravetz * page in a page table page mapping size. Used to skip non-present
7164e95a9851SMike Kravetz * page table entries when linearly scanning address ranges. Architectures
7165e95a9851SMike Kravetz * with unique huge page to page table relationships can define their own
7166e95a9851SMike Kravetz * version of this routine.
7167e95a9851SMike Kravetz */
hugetlb_mask_last_page(struct hstate * h)7168e95a9851SMike Kravetz unsigned long hugetlb_mask_last_page(struct hstate *h)
7169e95a9851SMike Kravetz {
7170e95a9851SMike Kravetz unsigned long hp_size = huge_page_size(h);
7171e95a9851SMike Kravetz
7172e95a9851SMike Kravetz if (hp_size == PUD_SIZE)
7173e95a9851SMike Kravetz return P4D_SIZE - PUD_SIZE;
7174e95a9851SMike Kravetz else if (hp_size == PMD_SIZE)
7175e95a9851SMike Kravetz return PUD_SIZE - PMD_SIZE;
7176e95a9851SMike Kravetz else
7177e95a9851SMike Kravetz return 0UL;
7178e95a9851SMike Kravetz }
7179e95a9851SMike Kravetz
7180e95a9851SMike Kravetz #else
7181e95a9851SMike Kravetz
7182e95a9851SMike Kravetz /* See description above. Architectures can provide their own version. */
hugetlb_mask_last_page(struct hstate * h)7183e95a9851SMike Kravetz __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7184e95a9851SMike Kravetz {
71854ddb4d91SMike Kravetz #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
71864ddb4d91SMike Kravetz if (huge_page_size(h) == PMD_SIZE)
71874ddb4d91SMike Kravetz return PUD_SIZE - PMD_SIZE;
71884ddb4d91SMike Kravetz #endif
7189e95a9851SMike Kravetz return 0UL;
7190e95a9851SMike Kravetz }
7191e95a9851SMike Kravetz
719261f77edaSNaoya Horiguchi #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
719361f77edaSNaoya Horiguchi
719461f77edaSNaoya Horiguchi /*
719561f77edaSNaoya Horiguchi * These functions are overwritable if your architecture needs its own
719661f77edaSNaoya Horiguchi * behavior.
719761f77edaSNaoya Horiguchi */
isolate_hugetlb(struct folio * folio,struct list_head * list)71989747b9e9SBaolin Wang bool isolate_hugetlb(struct folio *folio, struct list_head *list)
719931caf665SNaoya Horiguchi {
72009747b9e9SBaolin Wang bool ret = true;
7201bcc54222SNaoya Horiguchi
7202db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
72036aa3a920SSidhartha Kumar if (!folio_test_hugetlb(folio) ||
72046aa3a920SSidhartha Kumar !folio_test_hugetlb_migratable(folio) ||
72056aa3a920SSidhartha Kumar !folio_try_get(folio)) {
72069747b9e9SBaolin Wang ret = false;
7207bcc54222SNaoya Horiguchi goto unlock;
7208bcc54222SNaoya Horiguchi }
72096aa3a920SSidhartha Kumar folio_clear_hugetlb_migratable(folio);
72106aa3a920SSidhartha Kumar list_move_tail(&folio->lru, list);
7211bcc54222SNaoya Horiguchi unlock:
7212db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
7213bcc54222SNaoya Horiguchi return ret;
721431caf665SNaoya Horiguchi }
721531caf665SNaoya Horiguchi
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)721604bac040SSidhartha Kumar int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
721725182f05SNaoya Horiguchi {
721825182f05SNaoya Horiguchi int ret = 0;
721925182f05SNaoya Horiguchi
722025182f05SNaoya Horiguchi *hugetlb = false;
722125182f05SNaoya Horiguchi spin_lock_irq(&hugetlb_lock);
722204bac040SSidhartha Kumar if (folio_test_hugetlb(folio)) {
722325182f05SNaoya Horiguchi *hugetlb = true;
722404bac040SSidhartha Kumar if (folio_test_hugetlb_freed(folio))
7225b283d983SNaoya Horiguchi ret = 0;
722604bac040SSidhartha Kumar else if (folio_test_hugetlb_migratable(folio) || unpoison)
722704bac040SSidhartha Kumar ret = folio_try_get(folio);
72280ed950d1SNaoya Horiguchi else
72290ed950d1SNaoya Horiguchi ret = -EBUSY;
723025182f05SNaoya Horiguchi }
723125182f05SNaoya Horiguchi spin_unlock_irq(&hugetlb_lock);
723225182f05SNaoya Horiguchi return ret;
723325182f05SNaoya Horiguchi }
723425182f05SNaoya Horiguchi
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)7235e591ef7dSNaoya Horiguchi int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7236e591ef7dSNaoya Horiguchi bool *migratable_cleared)
7237405ce051SNaoya Horiguchi {
7238405ce051SNaoya Horiguchi int ret;
7239405ce051SNaoya Horiguchi
7240405ce051SNaoya Horiguchi spin_lock_irq(&hugetlb_lock);
7241e591ef7dSNaoya Horiguchi ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7242405ce051SNaoya Horiguchi spin_unlock_irq(&hugetlb_lock);
7243405ce051SNaoya Horiguchi return ret;
7244405ce051SNaoya Horiguchi }
7245405ce051SNaoya Horiguchi
folio_putback_active_hugetlb(struct folio * folio)7246ea8e72f4SSidhartha Kumar void folio_putback_active_hugetlb(struct folio *folio)
724731caf665SNaoya Horiguchi {
7248db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
7249ea8e72f4SSidhartha Kumar folio_set_hugetlb_migratable(folio);
7250ea8e72f4SSidhartha Kumar list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7251db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
7252ea8e72f4SSidhartha Kumar folio_put(folio);
725331caf665SNaoya Horiguchi }
7254ab5ac90aSMichal Hocko
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)7255345c62d1SSidhartha Kumar void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7256ab5ac90aSMichal Hocko {
7257345c62d1SSidhartha Kumar struct hstate *h = folio_hstate(old_folio);
7258ab5ac90aSMichal Hocko
7259345c62d1SSidhartha Kumar hugetlb_cgroup_migrate(old_folio, new_folio);
7260345c62d1SSidhartha Kumar set_page_owner_migrate_reason(&new_folio->page, reason);
7261ab5ac90aSMichal Hocko
7262ab5ac90aSMichal Hocko /*
7263345c62d1SSidhartha Kumar * transfer temporary state of the new hugetlb folio. This is
7264ab5ac90aSMichal Hocko * reverse to other transitions because the newpage is going to
7265ab5ac90aSMichal Hocko * be final while the old one will be freed so it takes over
7266ab5ac90aSMichal Hocko * the temporary status.
7267ab5ac90aSMichal Hocko *
7268ab5ac90aSMichal Hocko * Also note that we have to transfer the per-node surplus state
7269ab5ac90aSMichal Hocko * here as well otherwise the global surplus count will not match
7270ab5ac90aSMichal Hocko * the per-node's.
7271ab5ac90aSMichal Hocko */
7272345c62d1SSidhartha Kumar if (folio_test_hugetlb_temporary(new_folio)) {
7273345c62d1SSidhartha Kumar int old_nid = folio_nid(old_folio);
7274345c62d1SSidhartha Kumar int new_nid = folio_nid(new_folio);
7275ab5ac90aSMichal Hocko
7276345c62d1SSidhartha Kumar folio_set_hugetlb_temporary(old_folio);
7277345c62d1SSidhartha Kumar folio_clear_hugetlb_temporary(new_folio);
7278345c62d1SSidhartha Kumar
7279ab5ac90aSMichal Hocko
72805af1ab1dSMiaohe Lin /*
72815af1ab1dSMiaohe Lin * There is no need to transfer the per-node surplus state
72825af1ab1dSMiaohe Lin * when we do not cross the node.
72835af1ab1dSMiaohe Lin */
72845af1ab1dSMiaohe Lin if (new_nid == old_nid)
72855af1ab1dSMiaohe Lin return;
7286db71ef79SMike Kravetz spin_lock_irq(&hugetlb_lock);
7287ab5ac90aSMichal Hocko if (h->surplus_huge_pages_node[old_nid]) {
7288ab5ac90aSMichal Hocko h->surplus_huge_pages_node[old_nid]--;
7289ab5ac90aSMichal Hocko h->surplus_huge_pages_node[new_nid]++;
7290ab5ac90aSMichal Hocko }
7291db71ef79SMike Kravetz spin_unlock_irq(&hugetlb_lock);
7292ab5ac90aSMichal Hocko }
7293ab5ac90aSMichal Hocko }
7294cf11e85fSRoman Gushchin
hugetlb_unshare_pmds(struct vm_area_struct * vma,unsigned long start,unsigned long end)7295b30c14cdSJames Houghton static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7296b30c14cdSJames Houghton unsigned long start,
7297b30c14cdSJames Houghton unsigned long end)
72986dfeaff9SPeter Xu {
72996dfeaff9SPeter Xu struct hstate *h = hstate_vma(vma);
73006dfeaff9SPeter Xu unsigned long sz = huge_page_size(h);
73016dfeaff9SPeter Xu struct mm_struct *mm = vma->vm_mm;
73026dfeaff9SPeter Xu struct mmu_notifier_range range;
7303b30c14cdSJames Houghton unsigned long address;
73046dfeaff9SPeter Xu spinlock_t *ptl;
73056dfeaff9SPeter Xu pte_t *ptep;
73066dfeaff9SPeter Xu
73076dfeaff9SPeter Xu if (!(vma->vm_flags & VM_MAYSHARE))
73086dfeaff9SPeter Xu return;
73096dfeaff9SPeter Xu
73106dfeaff9SPeter Xu if (start >= end)
73116dfeaff9SPeter Xu return;
73126dfeaff9SPeter Xu
73139c8bbfacSBaolin Wang flush_cache_range(vma, start, end);
73146dfeaff9SPeter Xu /*
73156dfeaff9SPeter Xu * No need to call adjust_range_if_pmd_sharing_possible(), because
73166dfeaff9SPeter Xu * we have already done the PUD_SIZE alignment.
73176dfeaff9SPeter Xu */
73187d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
73196dfeaff9SPeter Xu start, end);
73206dfeaff9SPeter Xu mmu_notifier_invalidate_range_start(&range);
732140549ba8SMike Kravetz hugetlb_vma_lock_write(vma);
73226dfeaff9SPeter Xu i_mmap_lock_write(vma->vm_file->f_mapping);
73236dfeaff9SPeter Xu for (address = start; address < end; address += PUD_SIZE) {
73249c67a207SPeter Xu ptep = hugetlb_walk(vma, address, sz);
73256dfeaff9SPeter Xu if (!ptep)
73266dfeaff9SPeter Xu continue;
73276dfeaff9SPeter Xu ptl = huge_pte_lock(h, mm, ptep);
73284ddb4d91SMike Kravetz huge_pmd_unshare(mm, vma, address, ptep);
73296dfeaff9SPeter Xu spin_unlock(ptl);
73306dfeaff9SPeter Xu }
73316dfeaff9SPeter Xu flush_hugetlb_tlb_range(vma, start, end);
73326dfeaff9SPeter Xu i_mmap_unlock_write(vma->vm_file->f_mapping);
733340549ba8SMike Kravetz hugetlb_vma_unlock_write(vma);
73346dfeaff9SPeter Xu /*
73351af5a810SAlistair Popple * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7336ee65728eSMike Rapoport * Documentation/mm/mmu_notifier.rst.
73376dfeaff9SPeter Xu */
73386dfeaff9SPeter Xu mmu_notifier_invalidate_range_end(&range);
73396dfeaff9SPeter Xu }
73406dfeaff9SPeter Xu
7341b30c14cdSJames Houghton /*
7342b30c14cdSJames Houghton * This function will unconditionally remove all the shared pmd pgtable entries
7343b30c14cdSJames Houghton * within the specific vma for a hugetlbfs memory range.
7344b30c14cdSJames Houghton */
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)7345b30c14cdSJames Houghton void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7346b30c14cdSJames Houghton {
7347b30c14cdSJames Houghton hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7348b30c14cdSJames Houghton ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7349b30c14cdSJames Houghton }
7350b30c14cdSJames Houghton
7351cf11e85fSRoman Gushchin #ifdef CONFIG_CMA
7352cf11e85fSRoman Gushchin static bool cma_reserve_called __initdata;
7353cf11e85fSRoman Gushchin
cmdline_parse_hugetlb_cma(char * p)7354cf11e85fSRoman Gushchin static int __init cmdline_parse_hugetlb_cma(char *p)
7355cf11e85fSRoman Gushchin {
735638e719abSBaolin Wang int nid, count = 0;
735738e719abSBaolin Wang unsigned long tmp;
735838e719abSBaolin Wang char *s = p;
735938e719abSBaolin Wang
736038e719abSBaolin Wang while (*s) {
736138e719abSBaolin Wang if (sscanf(s, "%lu%n", &tmp, &count) != 1)
736238e719abSBaolin Wang break;
736338e719abSBaolin Wang
736438e719abSBaolin Wang if (s[count] == ':') {
7365f9317f77SMike Kravetz if (tmp >= MAX_NUMNODES)
736638e719abSBaolin Wang break;
7367f9317f77SMike Kravetz nid = array_index_nospec(tmp, MAX_NUMNODES);
736838e719abSBaolin Wang
736938e719abSBaolin Wang s += count + 1;
737038e719abSBaolin Wang tmp = memparse(s, &s);
737138e719abSBaolin Wang hugetlb_cma_size_in_node[nid] = tmp;
737238e719abSBaolin Wang hugetlb_cma_size += tmp;
737338e719abSBaolin Wang
737438e719abSBaolin Wang /*
737538e719abSBaolin Wang * Skip the separator if have one, otherwise
737638e719abSBaolin Wang * break the parsing.
737738e719abSBaolin Wang */
737838e719abSBaolin Wang if (*s == ',')
737938e719abSBaolin Wang s++;
738038e719abSBaolin Wang else
738138e719abSBaolin Wang break;
738238e719abSBaolin Wang } else {
7383cf11e85fSRoman Gushchin hugetlb_cma_size = memparse(p, &p);
738438e719abSBaolin Wang break;
738538e719abSBaolin Wang }
738638e719abSBaolin Wang }
738738e719abSBaolin Wang
7388cf11e85fSRoman Gushchin return 0;
7389cf11e85fSRoman Gushchin }
7390cf11e85fSRoman Gushchin
7391cf11e85fSRoman Gushchin early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7392cf11e85fSRoman Gushchin
hugetlb_cma_reserve(int order)7393cf11e85fSRoman Gushchin void __init hugetlb_cma_reserve(int order)
7394cf11e85fSRoman Gushchin {
7395cf11e85fSRoman Gushchin unsigned long size, reserved, per_node;
739638e719abSBaolin Wang bool node_specific_cma_alloc = false;
7397cf11e85fSRoman Gushchin int nid;
7398cf11e85fSRoman Gushchin
7399cf11e85fSRoman Gushchin cma_reserve_called = true;
7400cf11e85fSRoman Gushchin
7401cf11e85fSRoman Gushchin if (!hugetlb_cma_size)
7402cf11e85fSRoman Gushchin return;
7403cf11e85fSRoman Gushchin
740438e719abSBaolin Wang for (nid = 0; nid < MAX_NUMNODES; nid++) {
740538e719abSBaolin Wang if (hugetlb_cma_size_in_node[nid] == 0)
740638e719abSBaolin Wang continue;
740738e719abSBaolin Wang
740830a51400SPeng Liu if (!node_online(nid)) {
740938e719abSBaolin Wang pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
741038e719abSBaolin Wang hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
741138e719abSBaolin Wang hugetlb_cma_size_in_node[nid] = 0;
741238e719abSBaolin Wang continue;
741338e719abSBaolin Wang }
741438e719abSBaolin Wang
741538e719abSBaolin Wang if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
741638e719abSBaolin Wang pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
741738e719abSBaolin Wang nid, (PAGE_SIZE << order) / SZ_1M);
741838e719abSBaolin Wang hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
741938e719abSBaolin Wang hugetlb_cma_size_in_node[nid] = 0;
742038e719abSBaolin Wang } else {
742138e719abSBaolin Wang node_specific_cma_alloc = true;
742238e719abSBaolin Wang }
742338e719abSBaolin Wang }
742438e719abSBaolin Wang
742538e719abSBaolin Wang /* Validate the CMA size again in case some invalid nodes specified. */
742638e719abSBaolin Wang if (!hugetlb_cma_size)
742738e719abSBaolin Wang return;
742838e719abSBaolin Wang
7429cf11e85fSRoman Gushchin if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7430cf11e85fSRoman Gushchin pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7431cf11e85fSRoman Gushchin (PAGE_SIZE << order) / SZ_1M);
7432a01f4390SMike Kravetz hugetlb_cma_size = 0;
7433cf11e85fSRoman Gushchin return;
7434cf11e85fSRoman Gushchin }
7435cf11e85fSRoman Gushchin
743638e719abSBaolin Wang if (!node_specific_cma_alloc) {
7437cf11e85fSRoman Gushchin /*
7438cf11e85fSRoman Gushchin * If 3 GB area is requested on a machine with 4 numa nodes,
7439cf11e85fSRoman Gushchin * let's allocate 1 GB on first three nodes and ignore the last one.
7440cf11e85fSRoman Gushchin */
7441cf11e85fSRoman Gushchin per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7442cf11e85fSRoman Gushchin pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7443cf11e85fSRoman Gushchin hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
744438e719abSBaolin Wang }
7445cf11e85fSRoman Gushchin
7446cf11e85fSRoman Gushchin reserved = 0;
744730a51400SPeng Liu for_each_online_node(nid) {
7448cf11e85fSRoman Gushchin int res;
74492281f797SBarry Song char name[CMA_MAX_NAME];
7450cf11e85fSRoman Gushchin
745138e719abSBaolin Wang if (node_specific_cma_alloc) {
745238e719abSBaolin Wang if (hugetlb_cma_size_in_node[nid] == 0)
745338e719abSBaolin Wang continue;
745438e719abSBaolin Wang
745538e719abSBaolin Wang size = hugetlb_cma_size_in_node[nid];
745638e719abSBaolin Wang } else {
7457cf11e85fSRoman Gushchin size = min(per_node, hugetlb_cma_size - reserved);
745838e719abSBaolin Wang }
745938e719abSBaolin Wang
7460cf11e85fSRoman Gushchin size = round_up(size, PAGE_SIZE << order);
7461cf11e85fSRoman Gushchin
74622281f797SBarry Song snprintf(name, sizeof(name), "hugetlb%d", nid);
7463a01f4390SMike Kravetz /*
7464a01f4390SMike Kravetz * Note that 'order per bit' is based on smallest size that
7465a01f4390SMike Kravetz * may be returned to CMA allocator in the case of
7466a01f4390SMike Kravetz * huge page demotion.
7467a01f4390SMike Kravetz */
7468a01f4390SMike Kravetz res = cma_declare_contiguous_nid(0, size, 0,
7469a01f4390SMike Kravetz PAGE_SIZE << HUGETLB_PAGE_ORDER,
747029d0f41dSBarry Song 0, false, name,
7471cf11e85fSRoman Gushchin &hugetlb_cma[nid], nid);
7472cf11e85fSRoman Gushchin if (res) {
7473cf11e85fSRoman Gushchin pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7474cf11e85fSRoman Gushchin res, nid);
7475cf11e85fSRoman Gushchin continue;
7476cf11e85fSRoman Gushchin }
7477cf11e85fSRoman Gushchin
7478cf11e85fSRoman Gushchin reserved += size;
7479cf11e85fSRoman Gushchin pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7480cf11e85fSRoman Gushchin size / SZ_1M, nid);
7481cf11e85fSRoman Gushchin
7482cf11e85fSRoman Gushchin if (reserved >= hugetlb_cma_size)
7483cf11e85fSRoman Gushchin break;
7484cf11e85fSRoman Gushchin }
7485a01f4390SMike Kravetz
7486a01f4390SMike Kravetz if (!reserved)
7487a01f4390SMike Kravetz /*
7488a01f4390SMike Kravetz * hugetlb_cma_size is used to determine if allocations from
7489a01f4390SMike Kravetz * cma are possible. Set to zero if no cma regions are set up.
7490a01f4390SMike Kravetz */
7491a01f4390SMike Kravetz hugetlb_cma_size = 0;
7492cf11e85fSRoman Gushchin }
7493cf11e85fSRoman Gushchin
hugetlb_cma_check(void)7494263b8998SMiaohe Lin static void __init hugetlb_cma_check(void)
7495cf11e85fSRoman Gushchin {
7496cf11e85fSRoman Gushchin if (!hugetlb_cma_size || cma_reserve_called)
7497cf11e85fSRoman Gushchin return;
7498cf11e85fSRoman Gushchin
7499cf11e85fSRoman Gushchin pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7500cf11e85fSRoman Gushchin }
7501cf11e85fSRoman Gushchin
7502cf11e85fSRoman Gushchin #endif /* CONFIG_CMA */
7503