xref: /openbmc/linux/include/linux/hugetlb.h (revision 512b420a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_HUGETLB_H
31da177e4SLinus Torvalds #define _LINUX_HUGETLB_H
41da177e4SLinus Torvalds 
59c67a207SPeter Xu #include <linux/mm.h>
6be93d8cfSLinus Torvalds #include <linux/mm_types.h>
7309381feSSasha Levin #include <linux/mmdebug.h>
84e950f6fSAlexey Dobriyan #include <linux/fs.h>
98edf344cSNaoya Horiguchi #include <linux/hugetlb_inline.h>
10abb8206cSAneesh Kumar K.V #include <linux/cgroup.h>
113489dbb6SMike Kravetz #include <linux/page_ref.h>
129119a41eSJoonsoo Kim #include <linux/list.h>
139119a41eSJoonsoo Kim #include <linux/kref.h>
14ca5999fdSMike Rapoport #include <linux/pgtable.h>
15d92bbc27SJoonsoo Kim #include <linux/gfp.h>
16f6191471SAxel Rasmussen #include <linux/userfaultfd_k.h>
174e950f6fSAlexey Dobriyan 
18e9ea0e2dSAndrew Morton struct ctl_table;
19e9ea0e2dSAndrew Morton struct user_struct;
2024669e58SAneesh Kumar K.V struct mmu_gather;
21a4a00b45SMuchun Song struct node;
22e9ea0e2dSAndrew Morton 
23691cdf01SChristophe Leroy #ifndef CONFIG_ARCH_HAS_HUGEPD
24e2299292SAneesh Kumar K.V typedef struct { unsigned long pd; } hugepd_t;
25e2299292SAneesh Kumar K.V #define is_hugepd(hugepd) (0)
26e2299292SAneesh Kumar K.V #define __hugepd(x) ((hugepd_t) { (x) })
27e2299292SAneesh Kumar K.V #endif
28e2299292SAneesh Kumar K.V 
29454a00c4SMatthew Wilcox (Oracle) void free_huge_folio(struct folio *folio);
30dd6fa0b6SMatthew Wilcox (Oracle) 
311da177e4SLinus Torvalds #ifdef CONFIG_HUGETLB_PAGE
321da177e4SLinus Torvalds 
331da177e4SLinus Torvalds #include <linux/mempolicy.h>
34516dffdcSAdam Litke #include <linux/shm.h>
3563551ae0SDavid Gibson #include <asm/tlbflush.h>
361da177e4SLinus Torvalds 
37cd39d4e9SMuchun Song /*
38cd39d4e9SMuchun Song  * For HugeTLB page, there are more metadata to save in the struct page. But
39cd39d4e9SMuchun Song  * the head struct page cannot meet our needs, so we have to abuse other tail
40dad6a5ebSHugh Dickins  * struct page to store the metadata.
41cd39d4e9SMuchun Song  */
42dad6a5ebSHugh Dickins #define __NR_USED_SUBPAGE 3
43cd39d4e9SMuchun Song 
4490481622SDavid Gibson struct hugepage_subpool {
4590481622SDavid Gibson 	spinlock_t lock;
4690481622SDavid Gibson 	long count;
47c6a91820SMike Kravetz 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
48c6a91820SMike Kravetz 	long used_hpages;	/* Used count against maximum, includes */
4906c88398SZhen Lei 				/* both allocated and reserved pages. */
50c6a91820SMike Kravetz 	struct hstate *hstate;
51c6a91820SMike Kravetz 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
52c6a91820SMike Kravetz 	long rsv_hpages;	/* Pages reserved against global pool to */
536c26d310SMiaohe Lin 				/* satisfy minimum size. */
5490481622SDavid Gibson };
5590481622SDavid Gibson 
569119a41eSJoonsoo Kim struct resv_map {
579119a41eSJoonsoo Kim 	struct kref refs;
587b24d861SDavidlohr Bueso 	spinlock_t lock;
599119a41eSJoonsoo Kim 	struct list_head regions;
605e911373SMike Kravetz 	long adds_in_progress;
615e911373SMike Kravetz 	struct list_head region_cache;
625e911373SMike Kravetz 	long region_cache_count;
63bf491692SRik van Riel 	struct rw_semaphore rw_sema;
64e9fe92aeSMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
65e9fe92aeSMina Almasry 	/*
66e9fe92aeSMina Almasry 	 * On private mappings, the counter to uncharge reservations is stored
67e9fe92aeSMina Almasry 	 * here. If these fields are 0, then either the mapping is shared, or
68e9fe92aeSMina Almasry 	 * cgroup accounting is disabled for this resv_map.
69e9fe92aeSMina Almasry 	 */
70e9fe92aeSMina Almasry 	struct page_counter *reservation_counter;
71e9fe92aeSMina Almasry 	unsigned long pages_per_hpage;
72e9fe92aeSMina Almasry 	struct cgroup_subsys_state *css;
73e9fe92aeSMina Almasry #endif
749119a41eSJoonsoo Kim };
75075a61d0SMina Almasry 
76075a61d0SMina Almasry /*
77075a61d0SMina Almasry  * Region tracking -- allows tracking of reservations and instantiated pages
78075a61d0SMina Almasry  *                    across the pages in a mapping.
79075a61d0SMina Almasry  *
80075a61d0SMina Almasry  * The region data structures are embedded into a resv_map and protected
81075a61d0SMina Almasry  * by a resv_map's lock.  The set of regions within the resv_map represent
82075a61d0SMina Almasry  * reservations for huge pages, or huge pages that have already been
83075a61d0SMina Almasry  * instantiated within the map.  The from and to elements are huge page
8406c88398SZhen Lei  * indices into the associated mapping.  from indicates the starting index
85075a61d0SMina Almasry  * of the region.  to represents the first index past the end of  the region.
86075a61d0SMina Almasry  *
87075a61d0SMina Almasry  * For example, a file region structure with from == 0 and to == 4 represents
88075a61d0SMina Almasry  * four huge pages in a mapping.  It is important to note that the to element
89075a61d0SMina Almasry  * represents the first element past the end of the region. This is used in
90075a61d0SMina Almasry  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
91075a61d0SMina Almasry  *
92075a61d0SMina Almasry  * Interval notation of the form [from, to) will be used to indicate that
93075a61d0SMina Almasry  * the endpoint from is inclusive and to is exclusive.
94075a61d0SMina Almasry  */
95075a61d0SMina Almasry struct file_region {
96075a61d0SMina Almasry 	struct list_head link;
97075a61d0SMina Almasry 	long from;
98075a61d0SMina Almasry 	long to;
99075a61d0SMina Almasry #ifdef CONFIG_CGROUP_HUGETLB
100075a61d0SMina Almasry 	/*
101075a61d0SMina Almasry 	 * On shared mappings, each reserved region appears as a struct
102075a61d0SMina Almasry 	 * file_region in resv_map. These fields hold the info needed to
103075a61d0SMina Almasry 	 * uncharge each reservation.
104075a61d0SMina Almasry 	 */
105075a61d0SMina Almasry 	struct page_counter *reservation_counter;
106075a61d0SMina Almasry 	struct cgroup_subsys_state *css;
107075a61d0SMina Almasry #endif
108075a61d0SMina Almasry };
109075a61d0SMina Almasry 
1108d9bfb26SMike Kravetz struct hugetlb_vma_lock {
1118d9bfb26SMike Kravetz 	struct kref refs;
1128d9bfb26SMike Kravetz 	struct rw_semaphore rw_sema;
1138d9bfb26SMike Kravetz 	struct vm_area_struct *vma;
1148d9bfb26SMike Kravetz };
1158d9bfb26SMike Kravetz 
1169119a41eSJoonsoo Kim extern struct resv_map *resv_map_alloc(void);
1179119a41eSJoonsoo Kim void resv_map_release(struct kref *ref);
1189119a41eSJoonsoo Kim 
119c3f38a38SAneesh Kumar K.V extern spinlock_t hugetlb_lock;
120c3f38a38SAneesh Kumar K.V extern int hugetlb_max_hstate __read_mostly;
121c3f38a38SAneesh Kumar K.V #define for_each_hstate(h) \
122c3f38a38SAneesh Kumar K.V 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
123c3f38a38SAneesh Kumar K.V 
1247ca02d0aSMike Kravetz struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
1257ca02d0aSMike Kravetz 						long min_hpages);
12690481622SDavid Gibson void hugepage_put_subpool(struct hugepage_subpool *spool);
12790481622SDavid Gibson 
1288d9bfb26SMike Kravetz void hugetlb_dup_vma_private(struct vm_area_struct *vma);
129550a7d60SMina Almasry void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
130550a7d60SMina Almasry int move_hugetlb_page_tables(struct vm_area_struct *vma,
131550a7d60SMina Almasry 			     struct vm_area_struct *new_vma,
132550a7d60SMina Almasry 			     unsigned long old_addr, unsigned long new_addr,
133550a7d60SMina Almasry 			     unsigned long len);
134bc70fbf2SPeter Xu int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
135bc70fbf2SPeter Xu 			    struct vm_area_struct *, struct vm_area_struct *);
13657a196a5SMike Kravetz struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
1375502ea44SPeter Xu 				      unsigned long address, unsigned int flags,
1385502ea44SPeter Xu 				      unsigned int *page_mask);
13904f2cbe3SMel Gorman void unmap_hugepage_range(struct vm_area_struct *,
14005e90bd0SPeter Xu 			  unsigned long, unsigned long, struct page *,
14105e90bd0SPeter Xu 			  zap_flags_t);
1422820b0f0SRik van Riel void __unmap_hugepage_range(struct mmu_gather *tlb,
143d833352aSMel Gorman 			  struct vm_area_struct *vma,
144d833352aSMel Gorman 			  unsigned long start, unsigned long end,
14505e90bd0SPeter Xu 			  struct page *ref_page, zap_flags_t zap_flags);
146e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *);
1477981593bSJoe Perches int hugetlb_report_node_meminfo(char *buf, int len, int nid);
148dcadcf1cSGang Li void hugetlb_show_meminfo_node(int nid);
1491da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void);
1502b740303SSouptick Joarder vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
151788c7df4SHugh Dickins 			unsigned long address, unsigned int flags);
152714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
15361c50040SAxel Rasmussen int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
1548fb5debcSMike Kravetz 			     struct vm_area_struct *dst_vma,
1558fb5debcSMike Kravetz 			     unsigned long dst_addr,
1568fb5debcSMike Kravetz 			     unsigned long src_addr,
157d9712937SAxel Rasmussen 			     uffd_flags_t flags,
1580169fd51SZhangPeng 			     struct folio **foliop);
159714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
16033b8f84aSMike Kravetz bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
1615a6fe125SMel Gorman 						struct vm_area_struct *vma,
162ca16d140SKOSAKI Motohiro 						vm_flags_t vm_flags);
163b5cec28dSMike Kravetz long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
164b5cec28dSMike Kravetz 						long freed);
1659747b9e9SBaolin Wang bool isolate_hugetlb(struct folio *folio, struct list_head *list);
16604bac040SSidhartha Kumar int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
167e591ef7dSNaoya Horiguchi int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
168e591ef7dSNaoya Horiguchi 				bool *migratable_cleared);
169ea8e72f4SSidhartha Kumar void folio_putback_active_hugetlb(struct folio *folio);
170345c62d1SSidhartha Kumar void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
17172e2936cSzhong jiang void hugetlb_fix_reserve_counts(struct inode *inode);
172c672c7f2SMike Kravetz extern struct mutex *hugetlb_fault_mutex_table;
173188b04a7SWei Yang u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1741da177e4SLinus Torvalds 
175aec44e0fSPeter Xu pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
176aec44e0fSPeter Xu 		      unsigned long addr, pud_t *pud);
1773212b535SSteve Capper 
178c0d0381aSMike Kravetz struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
179c0d0381aSMike Kravetz 
1801da177e4SLinus Torvalds extern int sysctl_hugetlb_shm_group;
18153ba51d2SJon Tollefson extern struct list_head huge_boot_pages;
1821da177e4SLinus Torvalds 
18363551ae0SDavid Gibson /* arch callbacks */
18463551ae0SDavid Gibson 
185f7243924SHugh Dickins #ifndef CONFIG_HIGHPTE
186f7243924SHugh Dickins /*
187f7243924SHugh Dickins  * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
188f7243924SHugh Dickins  * which may go down to the lowest PTE level in their huge_pte_offset() and
189f7243924SHugh Dickins  * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
190f7243924SHugh Dickins  */
pte_offset_huge(pmd_t * pmd,unsigned long address)191f7243924SHugh Dickins static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
192f7243924SHugh Dickins {
193f7243924SHugh Dickins 	return pte_offset_kernel(pmd, address);
194f7243924SHugh Dickins }
pte_alloc_huge(struct mm_struct * mm,pmd_t * pmd,unsigned long address)195f7243924SHugh Dickins static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
196f7243924SHugh Dickins 				    unsigned long address)
197f7243924SHugh Dickins {
198f7243924SHugh Dickins 	return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
199f7243924SHugh Dickins }
200f7243924SHugh Dickins #endif
201f7243924SHugh Dickins 
202aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
203a5516438SAndi Kleen 			unsigned long addr, unsigned long sz);
204fe7d4c6dSPeter Xu /*
205fe7d4c6dSPeter Xu  * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
206fe7d4c6dSPeter Xu  * Returns the pte_t* if found, or NULL if the address is not mapped.
207fe7d4c6dSPeter Xu  *
2089c67a207SPeter Xu  * IMPORTANT: we should normally not directly call this function, instead
2099c67a207SPeter Xu  * this is only a common interface to implement arch-specific
2109c67a207SPeter Xu  * walker. Please use hugetlb_walk() instead, because that will attempt to
2119c67a207SPeter Xu  * verify the locking for you.
2129c67a207SPeter Xu  *
213fe7d4c6dSPeter Xu  * Since this function will walk all the pgtable pages (including not only
214fe7d4c6dSPeter Xu  * high-level pgtable page, but also PUD entry that can be unshared
215fe7d4c6dSPeter Xu  * concurrently for VM_SHARED), the caller of this function should be
216fe7d4c6dSPeter Xu  * responsible of its thread safety.  One can follow this rule:
217fe7d4c6dSPeter Xu  *
218fe7d4c6dSPeter Xu  *  (1) For private mappings: pmd unsharing is not possible, so holding the
219fe7d4c6dSPeter Xu  *      mmap_lock for either read or write is sufficient. Most callers
220fe7d4c6dSPeter Xu  *      already hold the mmap_lock, so normally, no special action is
221fe7d4c6dSPeter Xu  *      required.
222fe7d4c6dSPeter Xu  *
223fe7d4c6dSPeter Xu  *  (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
224fe7d4c6dSPeter Xu  *      pgtable page can go away from under us!  It can be done by a pmd
225fe7d4c6dSPeter Xu  *      unshare with a follow up munmap() on the other process), then we
226fe7d4c6dSPeter Xu  *      need either:
227fe7d4c6dSPeter Xu  *
228fe7d4c6dSPeter Xu  *     (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
229fe7d4c6dSPeter Xu  *           won't happen upon the range (it also makes sure the pte_t we
230fe7d4c6dSPeter Xu  *           read is the right and stable one), or,
231fe7d4c6dSPeter Xu  *
232fe7d4c6dSPeter Xu  *     (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
233fe7d4c6dSPeter Xu  *           sure even if unshare happened the racy unmap() will wait until
234fe7d4c6dSPeter Xu  *           i_mmap_rwsem is released.
235fe7d4c6dSPeter Xu  *
236fe7d4c6dSPeter Xu  * Option (2.1) is the safest, which guarantees pte stability from pmd
237fe7d4c6dSPeter Xu  * sharing pov, until the vma lock released.  Option (2.2) doesn't protect
238fe7d4c6dSPeter Xu  * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
239fe7d4c6dSPeter Xu  * access.
240fe7d4c6dSPeter Xu  */
2417868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
2427868a208SPunit Agrawal 		       unsigned long addr, unsigned long sz);
243e95a9851SMike Kravetz unsigned long hugetlb_mask_last_page(struct hstate *h);
24434ae204fSMike Kravetz int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
2454ddb4d91SMike Kravetz 				unsigned long addr, pte_t *ptep);
246017b1660SMike Kravetz void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
247017b1660SMike Kravetz 				unsigned long *start, unsigned long *end);
248faaa5b62SAnshuman Khandual 
2492820b0f0SRik van Riel extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
2502820b0f0SRik van Riel 				unsigned long *begin, unsigned long *end);
2512820b0f0SRik van Riel extern void __hugetlb_zap_end(struct vm_area_struct *vma,
2522820b0f0SRik van Riel 			      struct zap_details *details);
2532820b0f0SRik van Riel 
hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)2542820b0f0SRik van Riel static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
2552820b0f0SRik van Riel 				     unsigned long *start, unsigned long *end)
2562820b0f0SRik van Riel {
2572820b0f0SRik van Riel 	if (is_vm_hugetlb_page(vma))
2582820b0f0SRik van Riel 		__hugetlb_zap_begin(vma, start, end);
2592820b0f0SRik van Riel }
2602820b0f0SRik van Riel 
hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)2612820b0f0SRik van Riel static inline void hugetlb_zap_end(struct vm_area_struct *vma,
2622820b0f0SRik van Riel 				   struct zap_details *details)
2632820b0f0SRik van Riel {
2642820b0f0SRik van Riel 	if (is_vm_hugetlb_page(vma))
2652820b0f0SRik van Riel 		__hugetlb_zap_end(vma, details);
2662820b0f0SRik van Riel }
2672820b0f0SRik van Riel 
2688d9bfb26SMike Kravetz void hugetlb_vma_lock_read(struct vm_area_struct *vma);
2698d9bfb26SMike Kravetz void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
2708d9bfb26SMike Kravetz void hugetlb_vma_lock_write(struct vm_area_struct *vma);
2718d9bfb26SMike Kravetz void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
2728d9bfb26SMike Kravetz int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
2738d9bfb26SMike Kravetz void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
2748d9bfb26SMike Kravetz void hugetlb_vma_lock_release(struct kref *kref);
2758d9bfb26SMike Kravetz 
27663551ae0SDavid Gibson int pmd_huge(pmd_t pmd);
277c2febafcSKirill A. Shutemov int pud_huge(pud_t pud);
278a79390f5SPeter Xu long hugetlb_change_protection(struct vm_area_struct *vma,
2795a90d5a1SPeter Xu 		unsigned long address, unsigned long end, pgprot_t newprot,
2805a90d5a1SPeter Xu 		unsigned long cp_flags);
28163551ae0SDavid Gibson 
282d5ed7444SAneesh Kumar K.V bool is_hugetlb_entry_migration(pte_t pte);
2836dfeaff9SPeter Xu void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
284ab5ac90aSMichal Hocko 
2851da177e4SLinus Torvalds #else /* !CONFIG_HUGETLB_PAGE */
2861da177e4SLinus Torvalds 
hugetlb_dup_vma_private(struct vm_area_struct * vma)2878d9bfb26SMike Kravetz static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
288a1e78772SMel Gorman {
289a1e78772SMel Gorman }
290a1e78772SMel Gorman 
clear_vma_resv_huge_pages(struct vm_area_struct * vma)291550a7d60SMina Almasry static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
292550a7d60SMina Almasry {
293550a7d60SMina Almasry }
294550a7d60SMina Almasry 
hugetlb_total_pages(void)2951da177e4SLinus Torvalds static inline unsigned long hugetlb_total_pages(void)
2961da177e4SLinus Torvalds {
2971da177e4SLinus Torvalds 	return 0;
2981da177e4SLinus Torvalds }
2991da177e4SLinus Torvalds 
hugetlb_page_mapping_lock_write(struct page * hpage)300c0d0381aSMike Kravetz static inline struct address_space *hugetlb_page_mapping_lock_write(
301c0d0381aSMike Kravetz 							struct page *hpage)
302c0d0381aSMike Kravetz {
303c0d0381aSMike Kravetz 	return NULL;
304c0d0381aSMike Kravetz }
305c0d0381aSMike Kravetz 
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)30634ae204fSMike Kravetz static inline int huge_pmd_unshare(struct mm_struct *mm,
30734ae204fSMike Kravetz 					struct vm_area_struct *vma,
3084ddb4d91SMike Kravetz 					unsigned long addr, pte_t *ptep)
309017b1660SMike Kravetz {
310017b1660SMike Kravetz 	return 0;
311017b1660SMike Kravetz }
312017b1660SMike Kravetz 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)313017b1660SMike Kravetz static inline void adjust_range_if_pmd_sharing_possible(
314017b1660SMike Kravetz 				struct vm_area_struct *vma,
315017b1660SMike Kravetz 				unsigned long *start, unsigned long *end)
316017b1660SMike Kravetz {
317017b1660SMike Kravetz }
318017b1660SMike Kravetz 
hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)3192820b0f0SRik van Riel static inline void hugetlb_zap_begin(
3202820b0f0SRik van Riel 				struct vm_area_struct *vma,
3212820b0f0SRik van Riel 				unsigned long *start, unsigned long *end)
3222820b0f0SRik van Riel {
3232820b0f0SRik van Riel }
3242820b0f0SRik van Riel 
hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)3252820b0f0SRik van Riel static inline void hugetlb_zap_end(
3262820b0f0SRik van Riel 				struct vm_area_struct *vma,
3272820b0f0SRik van Riel 				struct zap_details *details)
3282820b0f0SRik van Riel {
3292820b0f0SRik van Riel }
3302820b0f0SRik van Riel 
hugetlb_follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned int * page_mask)3315502ea44SPeter Xu static inline struct page *hugetlb_follow_page_mask(
3325502ea44SPeter Xu     struct vm_area_struct *vma, unsigned long address, unsigned int flags,
3335502ea44SPeter Xu     unsigned int *page_mask)
33457a196a5SMike Kravetz {
33557a196a5SMike Kravetz 	BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
33657a196a5SMike Kravetz }
33757a196a5SMike Kravetz 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)3381f9dccb2SMike Kravetz static inline int copy_hugetlb_page_range(struct mm_struct *dst,
339bc70fbf2SPeter Xu 					  struct mm_struct *src,
340bc70fbf2SPeter Xu 					  struct vm_area_struct *dst_vma,
341bc70fbf2SPeter Xu 					  struct vm_area_struct *src_vma)
3421f9dccb2SMike Kravetz {
3431f9dccb2SMike Kravetz 	BUG();
3441f9dccb2SMike Kravetz 	return 0;
3451f9dccb2SMike Kravetz }
3461f9dccb2SMike Kravetz 
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)347550a7d60SMina Almasry static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
348550a7d60SMina Almasry 					   struct vm_area_struct *new_vma,
349550a7d60SMina Almasry 					   unsigned long old_addr,
350550a7d60SMina Almasry 					   unsigned long new_addr,
351550a7d60SMina Almasry 					   unsigned long len)
352550a7d60SMina Almasry {
353550a7d60SMina Almasry 	BUG();
354550a7d60SMina Almasry 	return 0;
355550a7d60SMina Almasry }
356550a7d60SMina Almasry 
hugetlb_report_meminfo(struct seq_file * m)357e1759c21SAlexey Dobriyan static inline void hugetlb_report_meminfo(struct seq_file *m)
358e1759c21SAlexey Dobriyan {
359e1759c21SAlexey Dobriyan }
3601f9dccb2SMike Kravetz 
hugetlb_report_node_meminfo(char * buf,int len,int nid)3617981593bSJoe Perches static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
3621f9dccb2SMike Kravetz {
3631f9dccb2SMike Kravetz 	return 0;
3641f9dccb2SMike Kravetz }
3651f9dccb2SMike Kravetz 
hugetlb_show_meminfo_node(int nid)366dcadcf1cSGang Li static inline void hugetlb_show_meminfo_node(int nid)
367949f7ec5SDavid Rientjes {
368949f7ec5SDavid Rientjes }
3691f9dccb2SMike Kravetz 
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)3701f9dccb2SMike Kravetz static inline int prepare_hugepage_range(struct file *file,
3711f9dccb2SMike Kravetz 				unsigned long addr, unsigned long len)
3721f9dccb2SMike Kravetz {
3731f9dccb2SMike Kravetz 	return -EINVAL;
3741f9dccb2SMike Kravetz }
3751f9dccb2SMike Kravetz 
hugetlb_vma_lock_read(struct vm_area_struct * vma)3768d9bfb26SMike Kravetz static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
3778d9bfb26SMike Kravetz {
3788d9bfb26SMike Kravetz }
3798d9bfb26SMike Kravetz 
hugetlb_vma_unlock_read(struct vm_area_struct * vma)3808d9bfb26SMike Kravetz static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
3818d9bfb26SMike Kravetz {
3828d9bfb26SMike Kravetz }
3838d9bfb26SMike Kravetz 
hugetlb_vma_lock_write(struct vm_area_struct * vma)3848d9bfb26SMike Kravetz static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
3858d9bfb26SMike Kravetz {
3868d9bfb26SMike Kravetz }
3878d9bfb26SMike Kravetz 
hugetlb_vma_unlock_write(struct vm_area_struct * vma)3888d9bfb26SMike Kravetz static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
3898d9bfb26SMike Kravetz {
3908d9bfb26SMike Kravetz }
3918d9bfb26SMike Kravetz 
hugetlb_vma_trylock_write(struct vm_area_struct * vma)3928d9bfb26SMike Kravetz static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
3938d9bfb26SMike Kravetz {
3948d9bfb26SMike Kravetz 	return 1;
3958d9bfb26SMike Kravetz }
3968d9bfb26SMike Kravetz 
hugetlb_vma_assert_locked(struct vm_area_struct * vma)3978d9bfb26SMike Kravetz static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
3988d9bfb26SMike Kravetz {
3998d9bfb26SMike Kravetz }
4008d9bfb26SMike Kravetz 
pmd_huge(pmd_t pmd)4011f9dccb2SMike Kravetz static inline int pmd_huge(pmd_t pmd)
4021f9dccb2SMike Kravetz {
4031f9dccb2SMike Kravetz 	return 0;
4041f9dccb2SMike Kravetz }
4051f9dccb2SMike Kravetz 
pud_huge(pud_t pud)4061f9dccb2SMike Kravetz static inline int pud_huge(pud_t pud)
4071f9dccb2SMike Kravetz {
4081f9dccb2SMike Kravetz 	return 0;
4091f9dccb2SMike Kravetz }
4101f9dccb2SMike Kravetz 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)4111f9dccb2SMike Kravetz static inline int is_hugepage_only_range(struct mm_struct *mm,
4121f9dccb2SMike Kravetz 					unsigned long addr, unsigned long len)
4131f9dccb2SMike Kravetz {
4141f9dccb2SMike Kravetz 	return 0;
4151f9dccb2SMike Kravetz }
4161f9dccb2SMike Kravetz 
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)4171f9dccb2SMike Kravetz static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
4181f9dccb2SMike Kravetz 				unsigned long addr, unsigned long end,
4191f9dccb2SMike Kravetz 				unsigned long floor, unsigned long ceiling)
4201f9dccb2SMike Kravetz {
4211f9dccb2SMike Kravetz 	BUG();
4221f9dccb2SMike Kravetz }
4231f9dccb2SMike Kravetz 
424714c1891SAxel Rasmussen #ifdef CONFIG_USERFAULTFD
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)42561c50040SAxel Rasmussen static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
4261f9dccb2SMike Kravetz 					   struct vm_area_struct *dst_vma,
4271f9dccb2SMike Kravetz 					   unsigned long dst_addr,
4281f9dccb2SMike Kravetz 					   unsigned long src_addr,
429d9712937SAxel Rasmussen 					   uffd_flags_t flags,
4300169fd51SZhangPeng 					   struct folio **foliop)
4311f9dccb2SMike Kravetz {
4321f9dccb2SMike Kravetz 	BUG();
4331f9dccb2SMike Kravetz 	return 0;
4341f9dccb2SMike Kravetz }
435714c1891SAxel Rasmussen #endif /* CONFIG_USERFAULTFD */
4361f9dccb2SMike Kravetz 
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)4371f9dccb2SMike Kravetz static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
4381f9dccb2SMike Kravetz 					unsigned long sz)
4391f9dccb2SMike Kravetz {
4401f9dccb2SMike Kravetz 	return NULL;
4411f9dccb2SMike Kravetz }
44224669e58SAneesh Kumar K.V 
isolate_hugetlb(struct folio * folio,struct list_head * list)4439747b9e9SBaolin Wang static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
444f40386a4SNaoya Horiguchi {
4459747b9e9SBaolin Wang 	return false;
446f40386a4SNaoya Horiguchi }
4471da177e4SLinus Torvalds 
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)44804bac040SSidhartha Kumar static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
44925182f05SNaoya Horiguchi {
45025182f05SNaoya Horiguchi 	return 0;
45125182f05SNaoya Horiguchi }
45225182f05SNaoya Horiguchi 
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)453e591ef7dSNaoya Horiguchi static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
454e591ef7dSNaoya Horiguchi 					bool *migratable_cleared)
455405ce051SNaoya Horiguchi {
456405ce051SNaoya Horiguchi 	return 0;
457405ce051SNaoya Horiguchi }
458405ce051SNaoya Horiguchi 
folio_putback_active_hugetlb(struct folio * folio)459ea8e72f4SSidhartha Kumar static inline void folio_putback_active_hugetlb(struct folio *folio)
4601f9dccb2SMike Kravetz {
4611f9dccb2SMike Kravetz }
4621f9dccb2SMike Kravetz 
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)463345c62d1SSidhartha Kumar static inline void move_hugetlb_state(struct folio *old_folio,
464345c62d1SSidhartha Kumar 					struct folio *new_folio, int reason)
4651f9dccb2SMike Kravetz {
4661f9dccb2SMike Kravetz }
4671f9dccb2SMike Kravetz 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)468a79390f5SPeter Xu static inline long hugetlb_change_protection(
4691f9dccb2SMike Kravetz 			struct vm_area_struct *vma, unsigned long address,
4705a90d5a1SPeter Xu 			unsigned long end, pgprot_t newprot,
4715a90d5a1SPeter Xu 			unsigned long cp_flags)
4727da4d641SPeter Zijlstra {
4737da4d641SPeter Zijlstra 	return 0;
4747da4d641SPeter Zijlstra }
4758f860591SZhang, Yanmin 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)4762820b0f0SRik van Riel static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
477d833352aSMel Gorman 			struct vm_area_struct *vma, unsigned long start,
47805e90bd0SPeter Xu 			unsigned long end, struct page *ref_page,
47905e90bd0SPeter Xu 			zap_flags_t zap_flags)
480d833352aSMel Gorman {
481d833352aSMel Gorman 	BUG();
482d833352aSMel Gorman }
483d833352aSMel Gorman 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)484a953e772SSouptick Joarder static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
485a953e772SSouptick Joarder 			struct vm_area_struct *vma, unsigned long address,
486a953e772SSouptick Joarder 			unsigned int flags)
487a953e772SSouptick Joarder {
488a953e772SSouptick Joarder 	BUG();
489a953e772SSouptick Joarder 	return 0;
490a953e772SSouptick Joarder }
49124669e58SAneesh Kumar K.V 
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)4926dfeaff9SPeter Xu static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
4936dfeaff9SPeter Xu 
4941da177e4SLinus Torvalds #endif /* !CONFIG_HUGETLB_PAGE */
495f30c59e9SAneesh Kumar K.V /*
496f30c59e9SAneesh Kumar K.V  * hugepages at page global directory. If arch support
497f30c59e9SAneesh Kumar K.V  * hugepages at pgd level, they need to define this.
498f30c59e9SAneesh Kumar K.V  */
499f30c59e9SAneesh Kumar K.V #ifndef pgd_huge
500f30c59e9SAneesh Kumar K.V #define pgd_huge(x)	0
501f30c59e9SAneesh Kumar K.V #endif
502c2febafcSKirill A. Shutemov #ifndef p4d_huge
503c2febafcSKirill A. Shutemov #define p4d_huge(x)	0
504c2febafcSKirill A. Shutemov #endif
505f30c59e9SAneesh Kumar K.V 
506f30c59e9SAneesh Kumar K.V #ifndef pgd_write
pgd_write(pgd_t pgd)507f30c59e9SAneesh Kumar K.V static inline int pgd_write(pgd_t pgd)
508f30c59e9SAneesh Kumar K.V {
509f30c59e9SAneesh Kumar K.V 	BUG();
510f30c59e9SAneesh Kumar K.V 	return 0;
511f30c59e9SAneesh Kumar K.V }
512f30c59e9SAneesh Kumar K.V #endif
513f30c59e9SAneesh Kumar K.V 
5144e52780dSEric B Munson #define HUGETLB_ANON_FILE "anon_hugepage"
5154e52780dSEric B Munson 
5166bfde05bSEric B Munson enum {
5176bfde05bSEric B Munson 	/*
5186bfde05bSEric B Munson 	 * The file will be used as an shm file so shmfs accounting rules
5196bfde05bSEric B Munson 	 * apply
5206bfde05bSEric B Munson 	 */
5216bfde05bSEric B Munson 	HUGETLB_SHMFS_INODE     = 1,
5224e52780dSEric B Munson 	/*
5234e52780dSEric B Munson 	 * The file is being created on the internal vfs mount and shmfs
5244e52780dSEric B Munson 	 * accounting rules do not apply
5254e52780dSEric B Munson 	 */
5264e52780dSEric B Munson 	HUGETLB_ANONHUGE_INODE  = 2,
5276bfde05bSEric B Munson };
5286bfde05bSEric B Munson 
5291da177e4SLinus Torvalds #ifdef CONFIG_HUGETLBFS
5301da177e4SLinus Torvalds struct hugetlbfs_sb_info {
5311da177e4SLinus Torvalds 	long	max_inodes;   /* inodes allowed */
5321da177e4SLinus Torvalds 	long	free_inodes;  /* inodes free */
5331da177e4SLinus Torvalds 	spinlock_t	stat_lock;
534a137e1ccSAndi Kleen 	struct hstate *hstate;
53590481622SDavid Gibson 	struct hugepage_subpool *spool;
5364a25220dSDavid Howells 	kuid_t	uid;
5374a25220dSDavid Howells 	kgid_t	gid;
5384a25220dSDavid Howells 	umode_t mode;
5391da177e4SLinus Torvalds };
5401da177e4SLinus Torvalds 
HUGETLBFS_SB(struct super_block * sb)5411da177e4SLinus Torvalds static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
5421da177e4SLinus Torvalds {
5431da177e4SLinus Torvalds 	return sb->s_fs_info;
5441da177e4SLinus Torvalds }
5451da177e4SLinus Torvalds 
546da14c1e5SMarc-André Lureau struct hugetlbfs_inode_info {
547da14c1e5SMarc-André Lureau 	struct shared_policy policy;
548da14c1e5SMarc-André Lureau 	struct inode vfs_inode;
549ff62a342SMarc-André Lureau 	unsigned int seals;
550da14c1e5SMarc-André Lureau };
551da14c1e5SMarc-André Lureau 
HUGETLBFS_I(struct inode * inode)552da14c1e5SMarc-André Lureau static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
553da14c1e5SMarc-André Lureau {
554da14c1e5SMarc-André Lureau 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
555da14c1e5SMarc-André Lureau }
556da14c1e5SMarc-André Lureau 
5574b6f5d20SArjan van de Ven extern const struct file_operations hugetlbfs_file_operations;
558f0f37e2fSAlexey Dobriyan extern const struct vm_operations_struct hugetlb_vm_ops;
559af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
56083c1fd76Szhangyiru 				int creat_flags, int page_size_log);
5611da177e4SLinus Torvalds 
is_file_hugepages(struct file * file)562719ff321SYaowei Bai static inline bool is_file_hugepages(struct file *file)
5631da177e4SLinus Torvalds {
564516dffdcSAdam Litke 	if (file->f_op == &hugetlbfs_file_operations)
565719ff321SYaowei Bai 		return true;
566516dffdcSAdam Litke 
567719ff321SYaowei Bai 	return is_file_shm_hugepages(file);
5681da177e4SLinus Torvalds }
5691da177e4SLinus Torvalds 
hstate_inode(struct inode * i)570bb297bb2SChristophe Leroy static inline struct hstate *hstate_inode(struct inode *i)
571bb297bb2SChristophe Leroy {
572bb297bb2SChristophe Leroy 	return HUGETLBFS_SB(i->i_sb)->hstate;
573bb297bb2SChristophe Leroy }
5741da177e4SLinus Torvalds #else /* !CONFIG_HUGETLBFS */
5751da177e4SLinus Torvalds 
576719ff321SYaowei Bai #define is_file_hugepages(file)			false
57740716e29SSteven Truelove static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,int creat_flags,int page_size_log)578af73e4d9SNaoya Horiguchi hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
57983c1fd76Szhangyiru 		int creat_flags, int page_size_log)
580e9ea0e2dSAndrew Morton {
581e9ea0e2dSAndrew Morton 	return ERR_PTR(-ENOSYS);
582e9ea0e2dSAndrew Morton }
5831da177e4SLinus Torvalds 
hstate_inode(struct inode * i)584bb297bb2SChristophe Leroy static inline struct hstate *hstate_inode(struct inode *i)
585bb297bb2SChristophe Leroy {
586bb297bb2SChristophe Leroy 	return NULL;
587bb297bb2SChristophe Leroy }
5881da177e4SLinus Torvalds #endif /* !CONFIG_HUGETLBFS */
5891da177e4SLinus Torvalds 
590d2ba27e8SAdrian Bunk #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
591d2ba27e8SAdrian Bunk unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
592d2ba27e8SAdrian Bunk 					unsigned long len, unsigned long pgoff,
593d2ba27e8SAdrian Bunk 					unsigned long flags);
594d2ba27e8SAdrian Bunk #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
595d2ba27e8SAdrian Bunk 
5964b439e25SChristophe Leroy unsigned long
5974b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5984b439e25SChristophe Leroy 				  unsigned long len, unsigned long pgoff,
5994b439e25SChristophe Leroy 				  unsigned long flags);
6004b439e25SChristophe Leroy 
601d6995da3SMike Kravetz /*
602d6995da3SMike Kravetz  * huegtlb page specific state flags.  These flags are located in page.private
603d6995da3SMike Kravetz  * of the hugetlb head page.  Functions created via the below macros should be
604d6995da3SMike Kravetz  * used to manipulate these flags.
605d6995da3SMike Kravetz  *
606d6995da3SMike Kravetz  * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
607d6995da3SMike Kravetz  *	allocation time.  Cleared when page is fully instantiated.  Free
608d6995da3SMike Kravetz  *	routine checks flag to restore a reservation on error paths.
609d95c0337SMike Kravetz  *	Synchronization:  Examined or modified by code that knows it has
610d95c0337SMike Kravetz  *	the only reference to page.  i.e. After allocation but before use
611d95c0337SMike Kravetz  *	or when the page is being freed.
6128f251a3dSMike Kravetz  * HPG_migratable  - Set after a newly allocated page is added to the page
6138f251a3dSMike Kravetz  *	cache and/or page tables.  Indicates the page is a candidate for
6148f251a3dSMike Kravetz  *	migration.
615d95c0337SMike Kravetz  *	Synchronization:  Initially set after new page allocation with no
616d95c0337SMike Kravetz  *	locking.  When examined and modified during migration processing
617d95c0337SMike Kravetz  *	(isolate, migrate, putback) the hugetlb_lock is held.
618161df60eSNaoya Horiguchi  * HPG_temporary - Set on a page that is temporarily allocated from the buddy
6199157c311SMike Kravetz  *	allocator.  Typically used for migration target pages when no pages
6209157c311SMike Kravetz  *	are available in the pool.  The hugetlb free page path will
6219157c311SMike Kravetz  *	immediately free pages with this flag set to the buddy allocator.
622d95c0337SMike Kravetz  *	Synchronization: Can be set after huge page allocation from buddy when
623d95c0337SMike Kravetz  *	code knows it has only reference.  All other examinations and
624d95c0337SMike Kravetz  *	modifications require hugetlb_lock.
6256c037149SMike Kravetz  * HPG_freed - Set when page is on the free lists.
626d95c0337SMike Kravetz  *	Synchronization: hugetlb_lock held for examination and modification.
627ad2fa371SMuchun Song  * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
628161df60eSNaoya Horiguchi  * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
629161df60eSNaoya Horiguchi  *     that is not tracked by raw_hwp_page list.
630d6995da3SMike Kravetz  */
631d6995da3SMike Kravetz enum hugetlb_page_flags {
632d6995da3SMike Kravetz 	HPG_restore_reserve = 0,
6338f251a3dSMike Kravetz 	HPG_migratable,
6349157c311SMike Kravetz 	HPG_temporary,
6356c037149SMike Kravetz 	HPG_freed,
636ad2fa371SMuchun Song 	HPG_vmemmap_optimized,
637161df60eSNaoya Horiguchi 	HPG_raw_hwp_unreliable,
638d6995da3SMike Kravetz 	__NR_HPAGEFLAGS,
639d6995da3SMike Kravetz };
640d6995da3SMike Kravetz 
641d6995da3SMike Kravetz /*
642d6995da3SMike Kravetz  * Macros to create test, set and clear function definitions for
643d6995da3SMike Kravetz  * hugetlb specific page flags.
644d6995da3SMike Kravetz  */
645d6995da3SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
646d6995da3SMike Kravetz #define TESTHPAGEFLAG(uname, flname)				\
647d03c376dSSidhartha Kumar static __always_inline						\
648d03c376dSSidhartha Kumar bool folio_test_hugetlb_##flname(struct folio *folio)		\
649d03c376dSSidhartha Kumar 	{	void *private = &folio->private;		\
650d03c376dSSidhartha Kumar 		return test_bit(HPG_##flname, private);		\
651d03c376dSSidhartha Kumar 	}							\
652d6995da3SMike Kravetz static inline int HPage##uname(struct page *page)		\
653d6995da3SMike Kravetz 	{ return test_bit(HPG_##flname, &(page->private)); }
654d6995da3SMike Kravetz 
655d6995da3SMike Kravetz #define SETHPAGEFLAG(uname, flname)				\
656d03c376dSSidhartha Kumar static __always_inline						\
657d03c376dSSidhartha Kumar void folio_set_hugetlb_##flname(struct folio *folio)		\
658d03c376dSSidhartha Kumar 	{	void *private = &folio->private;		\
659d03c376dSSidhartha Kumar 		set_bit(HPG_##flname, private);			\
660d03c376dSSidhartha Kumar 	}							\
661d6995da3SMike Kravetz static inline void SetHPage##uname(struct page *page)		\
662d6995da3SMike Kravetz 	{ set_bit(HPG_##flname, &(page->private)); }
663d6995da3SMike Kravetz 
664d6995da3SMike Kravetz #define CLEARHPAGEFLAG(uname, flname)				\
665d03c376dSSidhartha Kumar static __always_inline						\
666d03c376dSSidhartha Kumar void folio_clear_hugetlb_##flname(struct folio *folio)		\
667d03c376dSSidhartha Kumar 	{	void *private = &folio->private;		\
668d03c376dSSidhartha Kumar 		clear_bit(HPG_##flname, private);		\
669d03c376dSSidhartha Kumar 	}							\
670d6995da3SMike Kravetz static inline void ClearHPage##uname(struct page *page)		\
671d6995da3SMike Kravetz 	{ clear_bit(HPG_##flname, &(page->private)); }
672d6995da3SMike Kravetz #else
673d6995da3SMike Kravetz #define TESTHPAGEFLAG(uname, flname)				\
674d03c376dSSidhartha Kumar static inline bool						\
675d03c376dSSidhartha Kumar folio_test_hugetlb_##flname(struct folio *folio)		\
676d03c376dSSidhartha Kumar 	{ return 0; }						\
677d6995da3SMike Kravetz static inline int HPage##uname(struct page *page)		\
678d6995da3SMike Kravetz 	{ return 0; }
679d6995da3SMike Kravetz 
680d6995da3SMike Kravetz #define SETHPAGEFLAG(uname, flname)				\
681d03c376dSSidhartha Kumar static inline void						\
682d03c376dSSidhartha Kumar folio_set_hugetlb_##flname(struct folio *folio) 		\
683d03c376dSSidhartha Kumar 	{ }							\
684d6995da3SMike Kravetz static inline void SetHPage##uname(struct page *page)		\
685d6995da3SMike Kravetz 	{ }
686d6995da3SMike Kravetz 
687d6995da3SMike Kravetz #define CLEARHPAGEFLAG(uname, flname)				\
688d03c376dSSidhartha Kumar static inline void						\
689d03c376dSSidhartha Kumar folio_clear_hugetlb_##flname(struct folio *folio)		\
690d03c376dSSidhartha Kumar 	{ }							\
691d6995da3SMike Kravetz static inline void ClearHPage##uname(struct page *page)		\
692d6995da3SMike Kravetz 	{ }
693d6995da3SMike Kravetz #endif
694d6995da3SMike Kravetz 
695d6995da3SMike Kravetz #define HPAGEFLAG(uname, flname)				\
696d6995da3SMike Kravetz 	TESTHPAGEFLAG(uname, flname)				\
697d6995da3SMike Kravetz 	SETHPAGEFLAG(uname, flname)				\
698d6995da3SMike Kravetz 	CLEARHPAGEFLAG(uname, flname)				\
699d6995da3SMike Kravetz 
700d6995da3SMike Kravetz /*
701d6995da3SMike Kravetz  * Create functions associated with hugetlb page flags
702d6995da3SMike Kravetz  */
703d6995da3SMike Kravetz HPAGEFLAG(RestoreReserve, restore_reserve)
7048f251a3dSMike Kravetz HPAGEFLAG(Migratable, migratable)
7059157c311SMike Kravetz HPAGEFLAG(Temporary, temporary)
7066c037149SMike Kravetz HPAGEFLAG(Freed, freed)
707ad2fa371SMuchun Song HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
708161df60eSNaoya Horiguchi HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
709d6995da3SMike Kravetz 
710a5516438SAndi Kleen #ifdef CONFIG_HUGETLB_PAGE
711a5516438SAndi Kleen 
712a3437870SNishanth Aravamudan #define HSTATE_NAME_LEN 32
713a5516438SAndi Kleen /* Defines one hugetlb page size */
714a5516438SAndi Kleen struct hstate {
71529383967SMike Kravetz 	struct mutex resize_lock;
716e8c5c824SLee Schermerhorn 	int next_nid_to_alloc;
717e8c5c824SLee Schermerhorn 	int next_nid_to_free;
718a5516438SAndi Kleen 	unsigned int order;
71979dfc695SMike Kravetz 	unsigned int demote_order;
720a5516438SAndi Kleen 	unsigned long mask;
721a5516438SAndi Kleen 	unsigned long max_huge_pages;
722a5516438SAndi Kleen 	unsigned long nr_huge_pages;
723a5516438SAndi Kleen 	unsigned long free_huge_pages;
724a5516438SAndi Kleen 	unsigned long resv_huge_pages;
725a5516438SAndi Kleen 	unsigned long surplus_huge_pages;
726a5516438SAndi Kleen 	unsigned long nr_overcommit_huge_pages;
7270edaecfaSAneesh Kumar K.V 	struct list_head hugepage_activelist;
728a5516438SAndi Kleen 	struct list_head hugepage_freelists[MAX_NUMNODES];
729b5389086SZhenguo Yao 	unsigned int max_huge_pages_node[MAX_NUMNODES];
730a5516438SAndi Kleen 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
731a5516438SAndi Kleen 	unsigned int free_huge_pages_node[MAX_NUMNODES];
732a5516438SAndi Kleen 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
733abb8206cSAneesh Kumar K.V #ifdef CONFIG_CGROUP_HUGETLB
734abb8206cSAneesh Kumar K.V 	/* cgroup control files */
735f4776199SMina Almasry 	struct cftype cgroup_files_dfl[8];
736f4776199SMina Almasry 	struct cftype cgroup_files_legacy[10];
737abb8206cSAneesh Kumar K.V #endif
738a3437870SNishanth Aravamudan 	char name[HSTATE_NAME_LEN];
739a5516438SAndi Kleen };
740a5516438SAndi Kleen 
74153ba51d2SJon Tollefson struct huge_bootmem_page {
74253ba51d2SJon Tollefson 	struct list_head list;
74353ba51d2SJon Tollefson 	struct hstate *hstate;
74453ba51d2SJon Tollefson };
74553ba51d2SJon Tollefson 
746ae37c7ffSOscar Salvador int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
747d0ce0e47SSidhartha Kumar struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
74870c3547eSMike Kravetz 				unsigned long addr, int avoid_reserve);
749e37d3e83SSidhartha Kumar struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
750d92bbc27SJoonsoo Kim 				nodemask_t *nmask, gfp_t gfp_mask);
751d0ce0e47SSidhartha Kumar struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
752389c8178SMichal Hocko 				unsigned long address);
7539b91c0e2SSidhartha Kumar int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
754ab76ad54SMike Kravetz 			pgoff_t idx);
755846be085SMike Kravetz void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
756d2d7bb44SSidhartha Kumar 				unsigned long address, struct folio *folio);
757bf50bab2SNaoya Horiguchi 
75853ba51d2SJon Tollefson /* arch callback */
759b5389086SZhenguo Yao int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
760b5389086SZhenguo Yao int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
761b5389086SZhenguo Yao bool __init hugetlb_node_alloc_supported(void);
76253ba51d2SJon Tollefson 
763e5ff2159SAndi Kleen void __init hugetlb_add_hstate(unsigned order);
764ae94da89SMike Kravetz bool __init arch_hugetlb_valid_size(unsigned long size);
765e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size);
766e5ff2159SAndi Kleen 
767e5ff2159SAndi Kleen #ifndef HUGE_MAX_HSTATE
768e5ff2159SAndi Kleen #define HUGE_MAX_HSTATE 1
769e5ff2159SAndi Kleen #endif
770e5ff2159SAndi Kleen 
771e5ff2159SAndi Kleen extern struct hstate hstates[HUGE_MAX_HSTATE];
772e5ff2159SAndi Kleen extern unsigned int default_hstate_idx;
773e5ff2159SAndi Kleen 
774e5ff2159SAndi Kleen #define default_hstate (hstates[default_hstate_idx])
775a5516438SAndi Kleen 
hugetlb_folio_subpool(struct folio * folio)776149562f7SSidhartha Kumar static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
777149562f7SSidhartha Kumar {
778dad6a5ebSHugh Dickins 	return folio->_hugetlb_subpool;
779149562f7SSidhartha Kumar }
780149562f7SSidhartha Kumar 
hugetlb_set_folio_subpool(struct folio * folio,struct hugepage_subpool * subpool)781149562f7SSidhartha Kumar static inline void hugetlb_set_folio_subpool(struct folio *folio,
782149562f7SSidhartha Kumar 					struct hugepage_subpool *subpool)
783149562f7SSidhartha Kumar {
784dad6a5ebSHugh Dickins 	folio->_hugetlb_subpool = subpool;
785d6995da3SMike Kravetz }
786d6995da3SMike Kravetz 
hstate_file(struct file * f)787a5516438SAndi Kleen static inline struct hstate *hstate_file(struct file *f)
788a5516438SAndi Kleen {
789496ad9aaSAl Viro 	return hstate_inode(file_inode(f));
790a5516438SAndi Kleen }
791a5516438SAndi Kleen 
hstate_sizelog(int page_size_log)792af73e4d9SNaoya Horiguchi static inline struct hstate *hstate_sizelog(int page_size_log)
793af73e4d9SNaoya Horiguchi {
794af73e4d9SNaoya Horiguchi 	if (!page_size_log)
795af73e4d9SNaoya Horiguchi 		return &default_hstate;
79697ad2be1SSasha Levin 
797ec4288feSMike Kravetz 	if (page_size_log < BITS_PER_LONG)
79897ad2be1SSasha Levin 		return size_to_hstate(1UL << page_size_log);
799ec4288feSMike Kravetz 
800ec4288feSMike Kravetz 	return NULL;
801af73e4d9SNaoya Horiguchi }
802af73e4d9SNaoya Horiguchi 
hstate_vma(struct vm_area_struct * vma)803a137e1ccSAndi Kleen static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
804a5516438SAndi Kleen {
805a137e1ccSAndi Kleen 	return hstate_file(vma->vm_file);
806a5516438SAndi Kleen }
807a5516438SAndi Kleen 
huge_page_size(const struct hstate * h)8086213834cSMuchun Song static inline unsigned long huge_page_size(const struct hstate *h)
809a5516438SAndi Kleen {
810a5516438SAndi Kleen 	return (unsigned long)PAGE_SIZE << h->order;
811a5516438SAndi Kleen }
812a5516438SAndi Kleen 
81308fba699SMel Gorman extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
81408fba699SMel Gorman 
8153340289dSMel Gorman extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
8163340289dSMel Gorman 
huge_page_mask(struct hstate * h)817a5516438SAndi Kleen static inline unsigned long huge_page_mask(struct hstate *h)
818a5516438SAndi Kleen {
819a5516438SAndi Kleen 	return h->mask;
820a5516438SAndi Kleen }
821a5516438SAndi Kleen 
huge_page_order(struct hstate * h)822a5516438SAndi Kleen static inline unsigned int huge_page_order(struct hstate *h)
823a5516438SAndi Kleen {
824a5516438SAndi Kleen 	return h->order;
825a5516438SAndi Kleen }
826a5516438SAndi Kleen 
huge_page_shift(struct hstate * h)827a5516438SAndi Kleen static inline unsigned huge_page_shift(struct hstate *h)
828a5516438SAndi Kleen {
829a5516438SAndi Kleen 	return h->order + PAGE_SHIFT;
830a5516438SAndi Kleen }
831a5516438SAndi Kleen 
hstate_is_gigantic(struct hstate * h)832bae7f4aeSLuiz Capitulino static inline bool hstate_is_gigantic(struct hstate *h)
833bae7f4aeSLuiz Capitulino {
83423baf831SKirill A. Shutemov 	return huge_page_order(h) > MAX_ORDER;
835bae7f4aeSLuiz Capitulino }
836bae7f4aeSLuiz Capitulino 
pages_per_huge_page(const struct hstate * h)8376213834cSMuchun Song static inline unsigned int pages_per_huge_page(const struct hstate *h)
838a5516438SAndi Kleen {
839a5516438SAndi Kleen 	return 1 << h->order;
840a5516438SAndi Kleen }
841a5516438SAndi Kleen 
blocks_per_huge_page(struct hstate * h)842a5516438SAndi Kleen static inline unsigned int blocks_per_huge_page(struct hstate *h)
843a5516438SAndi Kleen {
844a5516438SAndi Kleen 	return huge_page_size(h) / 512;
845a5516438SAndi Kleen }
846a5516438SAndi Kleen 
847a5516438SAndi Kleen #include <asm/hugetlb.h>
848a5516438SAndi Kleen 
849b0eae98cSAnshuman Khandual #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)850b0eae98cSAnshuman Khandual static inline int is_hugepage_only_range(struct mm_struct *mm,
851b0eae98cSAnshuman Khandual 					unsigned long addr, unsigned long len)
852b0eae98cSAnshuman Khandual {
853b0eae98cSAnshuman Khandual 	return 0;
854b0eae98cSAnshuman Khandual }
855b0eae98cSAnshuman Khandual #define is_hugepage_only_range is_hugepage_only_range
856b0eae98cSAnshuman Khandual #endif
857b0eae98cSAnshuman Khandual 
8585be99343SAnshuman Khandual #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)8595be99343SAnshuman Khandual static inline void arch_clear_hugepage_flags(struct page *page) { }
8605be99343SAnshuman Khandual #define arch_clear_hugepage_flags arch_clear_hugepage_flags
8615be99343SAnshuman Khandual #endif
8625be99343SAnshuman Khandual 
863d9ed9faaSChris Metcalf #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)86479c1c594SChristophe Leroy static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
86579c1c594SChristophe Leroy 				       vm_flags_t flags)
866d9ed9faaSChris Metcalf {
86716785bd7SAnshuman Khandual 	return pte_mkhuge(entry);
868d9ed9faaSChris Metcalf }
869d9ed9faaSChris Metcalf #endif
870d9ed9faaSChris Metcalf 
folio_hstate(struct folio * folio)871e51da3a9SSidhartha Kumar static inline struct hstate *folio_hstate(struct folio *folio)
872e51da3a9SSidhartha Kumar {
873e51da3a9SSidhartha Kumar 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
874e51da3a9SSidhartha Kumar 	return size_to_hstate(folio_size(folio));
875e51da3a9SSidhartha Kumar }
876e51da3a9SSidhartha Kumar 
hstate_index_to_shift(unsigned index)877aa50d3a7SAndi Kleen static inline unsigned hstate_index_to_shift(unsigned index)
878aa50d3a7SAndi Kleen {
879aa50d3a7SAndi Kleen 	return hstates[index].order + PAGE_SHIFT;
880aa50d3a7SAndi Kleen }
881aa50d3a7SAndi Kleen 
hstate_index(struct hstate * h)882972dc4deSAneesh Kumar K.V static inline int hstate_index(struct hstate *h)
883972dc4deSAneesh Kumar K.V {
884972dc4deSAneesh Kumar K.V 	return h - hstates;
885972dc4deSAneesh Kumar K.V }
886972dc4deSAneesh Kumar K.V 
887c3114a84SAnshuman Khandual extern int dissolve_free_huge_page(struct page *page);
888082d5b6bSGerald Schaefer extern int dissolve_free_huge_pages(unsigned long start_pfn,
889c8721bbbSNaoya Horiguchi 				    unsigned long end_pfn);
890e693de18SAnshuman Khandual 
891161df60eSNaoya Horiguchi #ifdef CONFIG_MEMORY_FAILURE
8922ff6ceceSSidhartha Kumar extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
893161df60eSNaoya Horiguchi #else
folio_clear_hugetlb_hwpoison(struct folio * folio)8942ff6ceceSSidhartha Kumar static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
895161df60eSNaoya Horiguchi {
896161df60eSNaoya Horiguchi }
897161df60eSNaoya Horiguchi #endif
898161df60eSNaoya Horiguchi 
899c177c81eSNaoya Horiguchi #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
900e693de18SAnshuman Khandual #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)901e693de18SAnshuman Khandual static inline bool arch_hugetlb_migration_supported(struct hstate *h)
902e693de18SAnshuman Khandual {
90394310cbcSAnshuman Khandual 	if ((huge_page_shift(h) == PMD_SHIFT) ||
9049b553bf5SAnshuman Khandual 		(huge_page_shift(h) == PUD_SHIFT) ||
90594310cbcSAnshuman Khandual 			(huge_page_shift(h) == PGDIR_SHIFT))
90694310cbcSAnshuman Khandual 		return true;
90794310cbcSAnshuman Khandual 	else
90894310cbcSAnshuman Khandual 		return false;
909e693de18SAnshuman Khandual }
910c177c81eSNaoya Horiguchi #endif
911e693de18SAnshuman Khandual #else
arch_hugetlb_migration_supported(struct hstate * h)912e693de18SAnshuman Khandual static inline bool arch_hugetlb_migration_supported(struct hstate *h)
913e693de18SAnshuman Khandual {
914e693de18SAnshuman Khandual 	return false;
915e693de18SAnshuman Khandual }
916e693de18SAnshuman Khandual #endif
917e693de18SAnshuman Khandual 
hugepage_migration_supported(struct hstate * h)918e693de18SAnshuman Khandual static inline bool hugepage_migration_supported(struct hstate *h)
919e693de18SAnshuman Khandual {
920e693de18SAnshuman Khandual 	return arch_hugetlb_migration_supported(h);
92183467efbSNaoya Horiguchi }
922c8721bbbSNaoya Horiguchi 
9237ed2c31dSAnshuman Khandual /*
9247ed2c31dSAnshuman Khandual  * Movability check is different as compared to migration check.
9257ed2c31dSAnshuman Khandual  * It determines whether or not a huge page should be placed on
9267ed2c31dSAnshuman Khandual  * movable zone or not. Movability of any huge page should be
9277ed2c31dSAnshuman Khandual  * required only if huge page size is supported for migration.
92806c88398SZhen Lei  * There won't be any reason for the huge page to be movable if
9297ed2c31dSAnshuman Khandual  * it is not migratable to start with. Also the size of the huge
9307ed2c31dSAnshuman Khandual  * page should be large enough to be placed under a movable zone
9317ed2c31dSAnshuman Khandual  * and still feasible enough to be migratable. Just the presence
9327ed2c31dSAnshuman Khandual  * in movable zone does not make the migration feasible.
9337ed2c31dSAnshuman Khandual  *
9347ed2c31dSAnshuman Khandual  * So even though large huge page sizes like the gigantic ones
9357ed2c31dSAnshuman Khandual  * are migratable they should not be movable because its not
9367ed2c31dSAnshuman Khandual  * feasible to migrate them from movable zone.
9377ed2c31dSAnshuman Khandual  */
hugepage_movable_supported(struct hstate * h)9387ed2c31dSAnshuman Khandual static inline bool hugepage_movable_supported(struct hstate *h)
9397ed2c31dSAnshuman Khandual {
9407ed2c31dSAnshuman Khandual 	if (!hugepage_migration_supported(h))
9417ed2c31dSAnshuman Khandual 		return false;
9427ed2c31dSAnshuman Khandual 
9437ed2c31dSAnshuman Khandual 	if (hstate_is_gigantic(h))
9447ed2c31dSAnshuman Khandual 		return false;
9457ed2c31dSAnshuman Khandual 	return true;
9467ed2c31dSAnshuman Khandual }
9477ed2c31dSAnshuman Khandual 
948d92bbc27SJoonsoo Kim /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)949d92bbc27SJoonsoo Kim static inline gfp_t htlb_alloc_mask(struct hstate *h)
950d92bbc27SJoonsoo Kim {
951d92bbc27SJoonsoo Kim 	if (hugepage_movable_supported(h))
952d92bbc27SJoonsoo Kim 		return GFP_HIGHUSER_MOVABLE;
953d92bbc27SJoonsoo Kim 	else
954d92bbc27SJoonsoo Kim 		return GFP_HIGHUSER;
955d92bbc27SJoonsoo Kim }
956d92bbc27SJoonsoo Kim 
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)95719fc7bedSJoonsoo Kim static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
95819fc7bedSJoonsoo Kim {
95919fc7bedSJoonsoo Kim 	gfp_t modified_mask = htlb_alloc_mask(h);
96019fc7bedSJoonsoo Kim 
96119fc7bedSJoonsoo Kim 	/* Some callers might want to enforce node */
96219fc7bedSJoonsoo Kim 	modified_mask |= (gfp_mask & __GFP_THISNODE);
96319fc7bedSJoonsoo Kim 
96441b4dc14SJoonsoo Kim 	modified_mask |= (gfp_mask & __GFP_NOWARN);
96541b4dc14SJoonsoo Kim 
96619fc7bedSJoonsoo Kim 	return modified_mask;
96719fc7bedSJoonsoo Kim }
96819fc7bedSJoonsoo Kim 
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)969cb900f41SKirill A. Shutemov static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
970cb900f41SKirill A. Shutemov 					   struct mm_struct *mm, pte_t *pte)
971cb900f41SKirill A. Shutemov {
972cb900f41SKirill A. Shutemov 	if (huge_page_size(h) == PMD_SIZE)
973cb900f41SKirill A. Shutemov 		return pmd_lockptr(mm, (pmd_t *) pte);
974cb900f41SKirill A. Shutemov 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
975cb900f41SKirill A. Shutemov 	return &mm->page_table_lock;
976cb900f41SKirill A. Shutemov }
977cb900f41SKirill A. Shutemov 
9782531c8cfSDominik Dingel #ifndef hugepages_supported
979457c1b27SNishanth Aravamudan /*
980457c1b27SNishanth Aravamudan  * Some platform decide whether they support huge pages at boot
9812531c8cfSDominik Dingel  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
9822531c8cfSDominik Dingel  * when there is no such support
983457c1b27SNishanth Aravamudan  */
9842531c8cfSDominik Dingel #define hugepages_supported() (HPAGE_SHIFT != 0)
9852531c8cfSDominik Dingel #endif
986457c1b27SNishanth Aravamudan 
9875d317b2bSNaoya Horiguchi void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
9885d317b2bSNaoya Horiguchi 
hugetlb_count_init(struct mm_struct * mm)98913db8c50SLiu Zixian static inline void hugetlb_count_init(struct mm_struct *mm)
99013db8c50SLiu Zixian {
99113db8c50SLiu Zixian 	atomic_long_set(&mm->hugetlb_usage, 0);
99213db8c50SLiu Zixian }
99313db8c50SLiu Zixian 
hugetlb_count_add(long l,struct mm_struct * mm)9945d317b2bSNaoya Horiguchi static inline void hugetlb_count_add(long l, struct mm_struct *mm)
9955d317b2bSNaoya Horiguchi {
9965d317b2bSNaoya Horiguchi 	atomic_long_add(l, &mm->hugetlb_usage);
9975d317b2bSNaoya Horiguchi }
9985d317b2bSNaoya Horiguchi 
hugetlb_count_sub(long l,struct mm_struct * mm)9995d317b2bSNaoya Horiguchi static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
10005d317b2bSNaoya Horiguchi {
10015d317b2bSNaoya Horiguchi 	atomic_long_sub(l, &mm->hugetlb_usage);
10025d317b2bSNaoya Horiguchi }
1003e5251fd4SPunit Agrawal 
1004023bdd00SAneesh Kumar K.V #ifndef huge_ptep_modify_prot_start
1005023bdd00SAneesh Kumar K.V #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1006023bdd00SAneesh Kumar K.V static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1007023bdd00SAneesh Kumar K.V 						unsigned long addr, pte_t *ptep)
1008023bdd00SAneesh Kumar K.V {
1009023bdd00SAneesh Kumar K.V 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1010023bdd00SAneesh Kumar K.V }
1011023bdd00SAneesh Kumar K.V #endif
1012023bdd00SAneesh Kumar K.V 
1013023bdd00SAneesh Kumar K.V #ifndef huge_ptep_modify_prot_commit
1014023bdd00SAneesh Kumar K.V #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1015023bdd00SAneesh Kumar K.V static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1016023bdd00SAneesh Kumar K.V 						unsigned long addr, pte_t *ptep,
1017023bdd00SAneesh Kumar K.V 						pte_t old_pte, pte_t pte)
1018023bdd00SAneesh Kumar K.V {
1019935d4f0cSRyan Roberts 	unsigned long psize = huge_page_size(hstate_vma(vma));
1020935d4f0cSRyan Roberts 
1021935d4f0cSRyan Roberts 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
1022023bdd00SAneesh Kumar K.V }
1023023bdd00SAneesh Kumar K.V #endif
1024023bdd00SAneesh Kumar K.V 
1025a4a00b45SMuchun Song #ifdef CONFIG_NUMA
1026a4a00b45SMuchun Song void hugetlb_register_node(struct node *node);
1027a4a00b45SMuchun Song void hugetlb_unregister_node(struct node *node);
1028a4a00b45SMuchun Song #endif
1029a4a00b45SMuchun Song 
1030b79f8eb4SJiaqi Yan /*
1031b79f8eb4SJiaqi Yan  * Check if a given raw @page in a hugepage is HWPOISON.
1032b79f8eb4SJiaqi Yan  */
1033b79f8eb4SJiaqi Yan bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1034b79f8eb4SJiaqi Yan 
1035af73e4d9SNaoya Horiguchi #else	/* CONFIG_HUGETLB_PAGE */
1036a5516438SAndi Kleen struct hstate {};
1037442a5a9aSJason Gunthorpe 
1038345c62d1SSidhartha Kumar static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1039345c62d1SSidhartha Kumar {
1040345c62d1SSidhartha Kumar 	return NULL;
1041345c62d1SSidhartha Kumar }
1042345c62d1SSidhartha Kumar 
1043ae37c7ffSOscar Salvador static inline int isolate_or_dissolve_huge_page(struct page *page,
1044ae37c7ffSOscar Salvador 						struct list_head *list)
1045369fa227SOscar Salvador {
1046369fa227SOscar Salvador 	return -ENOMEM;
1047369fa227SOscar Salvador }
1048369fa227SOscar Salvador 
1049d0ce0e47SSidhartha Kumar static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1050442a5a9aSJason Gunthorpe 					   unsigned long addr,
1051442a5a9aSJason Gunthorpe 					   int avoid_reserve)
1052442a5a9aSJason Gunthorpe {
1053442a5a9aSJason Gunthorpe 	return NULL;
1054442a5a9aSJason Gunthorpe }
1055442a5a9aSJason Gunthorpe 
1056e37d3e83SSidhartha Kumar static inline struct folio *
1057e37d3e83SSidhartha Kumar alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1058d92bbc27SJoonsoo Kim 			nodemask_t *nmask, gfp_t gfp_mask)
1059442a5a9aSJason Gunthorpe {
1060442a5a9aSJason Gunthorpe 	return NULL;
1061442a5a9aSJason Gunthorpe }
1062442a5a9aSJason Gunthorpe 
1063d0ce0e47SSidhartha Kumar static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
1064442a5a9aSJason Gunthorpe 					       struct vm_area_struct *vma,
1065442a5a9aSJason Gunthorpe 					       unsigned long address)
1066442a5a9aSJason Gunthorpe {
1067442a5a9aSJason Gunthorpe 	return NULL;
1068442a5a9aSJason Gunthorpe }
1069442a5a9aSJason Gunthorpe 
1070442a5a9aSJason Gunthorpe static inline int __alloc_bootmem_huge_page(struct hstate *h)
1071442a5a9aSJason Gunthorpe {
1072442a5a9aSJason Gunthorpe 	return 0;
1073442a5a9aSJason Gunthorpe }
1074442a5a9aSJason Gunthorpe 
1075442a5a9aSJason Gunthorpe static inline struct hstate *hstate_file(struct file *f)
1076442a5a9aSJason Gunthorpe {
1077442a5a9aSJason Gunthorpe 	return NULL;
1078442a5a9aSJason Gunthorpe }
1079442a5a9aSJason Gunthorpe 
1080442a5a9aSJason Gunthorpe static inline struct hstate *hstate_sizelog(int page_size_log)
1081442a5a9aSJason Gunthorpe {
1082442a5a9aSJason Gunthorpe 	return NULL;
1083442a5a9aSJason Gunthorpe }
1084442a5a9aSJason Gunthorpe 
1085442a5a9aSJason Gunthorpe static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1086442a5a9aSJason Gunthorpe {
1087442a5a9aSJason Gunthorpe 	return NULL;
1088442a5a9aSJason Gunthorpe }
1089442a5a9aSJason Gunthorpe 
1090e51da3a9SSidhartha Kumar static inline struct hstate *folio_hstate(struct folio *folio)
1091e51da3a9SSidhartha Kumar {
1092e51da3a9SSidhartha Kumar 	return NULL;
1093e51da3a9SSidhartha Kumar }
1094e51da3a9SSidhartha Kumar 
10952aff7a47SMatthew Wilcox (Oracle) static inline struct hstate *size_to_hstate(unsigned long size)
10962aff7a47SMatthew Wilcox (Oracle) {
10972aff7a47SMatthew Wilcox (Oracle) 	return NULL;
10982aff7a47SMatthew Wilcox (Oracle) }
10992aff7a47SMatthew Wilcox (Oracle) 
1100442a5a9aSJason Gunthorpe static inline unsigned long huge_page_size(struct hstate *h)
1101442a5a9aSJason Gunthorpe {
1102442a5a9aSJason Gunthorpe 	return PAGE_SIZE;
1103442a5a9aSJason Gunthorpe }
1104442a5a9aSJason Gunthorpe 
1105442a5a9aSJason Gunthorpe static inline unsigned long huge_page_mask(struct hstate *h)
1106442a5a9aSJason Gunthorpe {
1107442a5a9aSJason Gunthorpe 	return PAGE_MASK;
1108442a5a9aSJason Gunthorpe }
1109442a5a9aSJason Gunthorpe 
1110442a5a9aSJason Gunthorpe static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1111442a5a9aSJason Gunthorpe {
1112442a5a9aSJason Gunthorpe 	return PAGE_SIZE;
1113442a5a9aSJason Gunthorpe }
1114442a5a9aSJason Gunthorpe 
1115442a5a9aSJason Gunthorpe static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1116442a5a9aSJason Gunthorpe {
1117442a5a9aSJason Gunthorpe 	return PAGE_SIZE;
1118442a5a9aSJason Gunthorpe }
1119442a5a9aSJason Gunthorpe 
1120442a5a9aSJason Gunthorpe static inline unsigned int huge_page_order(struct hstate *h)
1121442a5a9aSJason Gunthorpe {
1122442a5a9aSJason Gunthorpe 	return 0;
1123442a5a9aSJason Gunthorpe }
1124442a5a9aSJason Gunthorpe 
1125442a5a9aSJason Gunthorpe static inline unsigned int huge_page_shift(struct hstate *h)
1126442a5a9aSJason Gunthorpe {
1127442a5a9aSJason Gunthorpe 	return PAGE_SHIFT;
1128442a5a9aSJason Gunthorpe }
1129442a5a9aSJason Gunthorpe 
113094310cbcSAnshuman Khandual static inline bool hstate_is_gigantic(struct hstate *h)
113194310cbcSAnshuman Khandual {
113294310cbcSAnshuman Khandual 	return false;
113394310cbcSAnshuman Khandual }
113494310cbcSAnshuman Khandual 
1135510a35d4SAndrea Righi static inline unsigned int pages_per_huge_page(struct hstate *h)
1136510a35d4SAndrea Righi {
1137510a35d4SAndrea Righi 	return 1;
1138510a35d4SAndrea Righi }
1139c3114a84SAnshuman Khandual 
1140c3114a84SAnshuman Khandual static inline unsigned hstate_index_to_shift(unsigned index)
1141c3114a84SAnshuman Khandual {
1142c3114a84SAnshuman Khandual 	return 0;
1143c3114a84SAnshuman Khandual }
1144c3114a84SAnshuman Khandual 
1145c3114a84SAnshuman Khandual static inline int hstate_index(struct hstate *h)
1146c3114a84SAnshuman Khandual {
1147c3114a84SAnshuman Khandual 	return 0;
1148c3114a84SAnshuman Khandual }
114913d60f4bSZhang Yi 
1150c3114a84SAnshuman Khandual static inline int dissolve_free_huge_page(struct page *page)
1151c3114a84SAnshuman Khandual {
1152c3114a84SAnshuman Khandual 	return 0;
1153c3114a84SAnshuman Khandual }
1154c3114a84SAnshuman Khandual 
1155c3114a84SAnshuman Khandual static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1156c3114a84SAnshuman Khandual 					   unsigned long end_pfn)
1157c3114a84SAnshuman Khandual {
1158c3114a84SAnshuman Khandual 	return 0;
1159c3114a84SAnshuman Khandual }
1160c3114a84SAnshuman Khandual 
1161c3114a84SAnshuman Khandual static inline bool hugepage_migration_supported(struct hstate *h)
1162c3114a84SAnshuman Khandual {
1163c3114a84SAnshuman Khandual 	return false;
1164c3114a84SAnshuman Khandual }
1165cb900f41SKirill A. Shutemov 
11667ed2c31dSAnshuman Khandual static inline bool hugepage_movable_supported(struct hstate *h)
11677ed2c31dSAnshuman Khandual {
11687ed2c31dSAnshuman Khandual 	return false;
11697ed2c31dSAnshuman Khandual }
11707ed2c31dSAnshuman Khandual 
1171d92bbc27SJoonsoo Kim static inline gfp_t htlb_alloc_mask(struct hstate *h)
1172d92bbc27SJoonsoo Kim {
1173d92bbc27SJoonsoo Kim 	return 0;
1174d92bbc27SJoonsoo Kim }
1175d92bbc27SJoonsoo Kim 
117619fc7bedSJoonsoo Kim static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
117719fc7bedSJoonsoo Kim {
117819fc7bedSJoonsoo Kim 	return 0;
117919fc7bedSJoonsoo Kim }
118019fc7bedSJoonsoo Kim 
1181cb900f41SKirill A. Shutemov static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1182cb900f41SKirill A. Shutemov 					   struct mm_struct *mm, pte_t *pte)
1183cb900f41SKirill A. Shutemov {
1184cb900f41SKirill A. Shutemov 	return &mm->page_table_lock;
1185cb900f41SKirill A. Shutemov }
11865d317b2bSNaoya Horiguchi 
118713db8c50SLiu Zixian static inline void hugetlb_count_init(struct mm_struct *mm)
118813db8c50SLiu Zixian {
118913db8c50SLiu Zixian }
119013db8c50SLiu Zixian 
11915d317b2bSNaoya Horiguchi static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
11925d317b2bSNaoya Horiguchi {
11935d317b2bSNaoya Horiguchi }
11945d317b2bSNaoya Horiguchi 
11955d317b2bSNaoya Horiguchi static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
11965d317b2bSNaoya Horiguchi {
11975d317b2bSNaoya Horiguchi }
1198e5251fd4SPunit Agrawal 
11995d4af619SBaolin Wang static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
12005d4af619SBaolin Wang 					  unsigned long addr, pte_t *ptep)
12015d4af619SBaolin Wang {
1202c33c7948SRyan Roberts #ifdef CONFIG_MMU
1203c33c7948SRyan Roberts 	return ptep_get(ptep);
1204c33c7948SRyan Roberts #else
12055d4af619SBaolin Wang 	return *ptep;
1206c33c7948SRyan Roberts #endif
12075d4af619SBaolin Wang }
12085d4af619SBaolin Wang 
12095d4af619SBaolin Wang static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1210935d4f0cSRyan Roberts 				   pte_t *ptep, pte_t pte, unsigned long sz)
12115d4af619SBaolin Wang {
12125d4af619SBaolin Wang }
1213a4a00b45SMuchun Song 
1214a4a00b45SMuchun Song static inline void hugetlb_register_node(struct node *node)
1215a4a00b45SMuchun Song {
1216a4a00b45SMuchun Song }
1217a4a00b45SMuchun Song 
1218a4a00b45SMuchun Song static inline void hugetlb_unregister_node(struct node *node)
1219a4a00b45SMuchun Song {
1220a4a00b45SMuchun Song }
1221af73e4d9SNaoya Horiguchi #endif	/* CONFIG_HUGETLB_PAGE */
1222a5516438SAndi Kleen 
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1223cb900f41SKirill A. Shutemov static inline spinlock_t *huge_pte_lock(struct hstate *h,
1224cb900f41SKirill A. Shutemov 					struct mm_struct *mm, pte_t *pte)
1225cb900f41SKirill A. Shutemov {
1226cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
1227cb900f41SKirill A. Shutemov 
1228cb900f41SKirill A. Shutemov 	ptl = huge_pte_lockptr(h, mm, pte);
1229cb900f41SKirill A. Shutemov 	spin_lock(ptl);
1230cb900f41SKirill A. Shutemov 	return ptl;
1231cb900f41SKirill A. Shutemov }
1232cb900f41SKirill A. Shutemov 
1233cf11e85fSRoman Gushchin #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1234cf11e85fSRoman Gushchin extern void __init hugetlb_cma_reserve(int order);
1235cf11e85fSRoman Gushchin #else
hugetlb_cma_reserve(int order)1236cf11e85fSRoman Gushchin static inline __init void hugetlb_cma_reserve(int order)
1237cf11e85fSRoman Gushchin {
1238cf11e85fSRoman Gushchin }
1239cf11e85fSRoman Gushchin #endif
1240cf11e85fSRoman Gushchin 
12413489dbb6SMike Kravetz #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
hugetlb_pmd_shared(pte_t * pte)12423489dbb6SMike Kravetz static inline bool hugetlb_pmd_shared(pte_t *pte)
12433489dbb6SMike Kravetz {
12443489dbb6SMike Kravetz 	return page_count(virt_to_page(pte)) > 1;
12453489dbb6SMike Kravetz }
12463489dbb6SMike Kravetz #else
hugetlb_pmd_shared(pte_t * pte)12473489dbb6SMike Kravetz static inline bool hugetlb_pmd_shared(pte_t *pte)
12483489dbb6SMike Kravetz {
12493489dbb6SMike Kravetz 	return false;
12503489dbb6SMike Kravetz }
12513489dbb6SMike Kravetz #endif
12523489dbb6SMike Kravetz 
1253c1991e07SPeter Xu bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1254c1991e07SPeter Xu 
1255537cf30bSPeter Xu #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1256537cf30bSPeter Xu /*
1257537cf30bSPeter Xu  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1258537cf30bSPeter Xu  * implement this.
1259537cf30bSPeter Xu  */
1260537cf30bSPeter Xu #define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1261537cf30bSPeter Xu #endif
1262537cf30bSPeter Xu 
__vma_shareable_lock(struct vm_area_struct * vma)12639c67a207SPeter Xu static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
12649c67a207SPeter Xu {
12659c67a207SPeter Xu 	return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
12669c67a207SPeter Xu }
12679c67a207SPeter Xu 
1268*512b420aSMike Kravetz bool __vma_private_lock(struct vm_area_struct *vma);
1269bf491692SRik van Riel 
12709c67a207SPeter Xu /*
12719c67a207SPeter Xu  * Safe version of huge_pte_offset() to check the locks.  See comments
12729c67a207SPeter Xu  * above huge_pte_offset().
12739c67a207SPeter Xu  */
12749c67a207SPeter Xu static inline pte_t *
hugetlb_walk(struct vm_area_struct * vma,unsigned long addr,unsigned long sz)12759c67a207SPeter Xu hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
12769c67a207SPeter Xu {
12779c67a207SPeter Xu #if defined(CONFIG_HUGETLB_PAGE) && \
12789c67a207SPeter Xu 	defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
12799c67a207SPeter Xu 	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
12809c67a207SPeter Xu 
12819c67a207SPeter Xu 	/*
12829c67a207SPeter Xu 	 * If pmd sharing possible, locking needed to safely walk the
12839c67a207SPeter Xu 	 * hugetlb pgtables.  More information can be found at the comment
12849c67a207SPeter Xu 	 * above huge_pte_offset() in the same file.
12859c67a207SPeter Xu 	 *
12869c67a207SPeter Xu 	 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
12879c67a207SPeter Xu 	 */
12889c67a207SPeter Xu 	if (__vma_shareable_lock(vma))
12899c67a207SPeter Xu 		WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
12909c67a207SPeter Xu 			     !lockdep_is_held(
12919c67a207SPeter Xu 				 &vma->vm_file->f_mapping->i_mmap_rwsem));
12929c67a207SPeter Xu #endif
12939c67a207SPeter Xu 	return huge_pte_offset(vma->vm_mm, addr, sz);
12949c67a207SPeter Xu }
12959c67a207SPeter Xu 
12961da177e4SLinus Torvalds #endif /* _LINUX_HUGETLB_H */
1297