1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm.h>
6 #include <linux/mm_types.h>
7 #include <linux/mmdebug.h>
8 #include <linux/fs.h>
9 #include <linux/hugetlb_inline.h>
10 #include <linux/cgroup.h>
11 #include <linux/page_ref.h>
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/pgtable.h>
15 #include <linux/gfp.h>
16 #include <linux/userfaultfd_k.h>
17
18 struct ctl_table;
19 struct user_struct;
20 struct mmu_gather;
21 struct node;
22
23 #ifndef CONFIG_ARCH_HAS_HUGEPD
24 typedef struct { unsigned long pd; } hugepd_t;
25 #define is_hugepd(hugepd) (0)
26 #define __hugepd(x) ((hugepd_t) { (x) })
27 #endif
28
29 void free_huge_folio(struct folio *folio);
30
31 #ifdef CONFIG_HUGETLB_PAGE
32
33 #include <linux/mempolicy.h>
34 #include <linux/shm.h>
35 #include <asm/tlbflush.h>
36
37 /*
38 * For HugeTLB page, there are more metadata to save in the struct page. But
39 * the head struct page cannot meet our needs, so we have to abuse other tail
40 * struct page to store the metadata.
41 */
42 #define __NR_USED_SUBPAGE 3
43
44 struct hugepage_subpool {
45 spinlock_t lock;
46 long count;
47 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
48 long used_hpages; /* Used count against maximum, includes */
49 /* both allocated and reserved pages. */
50 struct hstate *hstate;
51 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
52 long rsv_hpages; /* Pages reserved against global pool to */
53 /* satisfy minimum size. */
54 };
55
56 struct resv_map {
57 struct kref refs;
58 spinlock_t lock;
59 struct list_head regions;
60 long adds_in_progress;
61 struct list_head region_cache;
62 long region_cache_count;
63 struct rw_semaphore rw_sema;
64 #ifdef CONFIG_CGROUP_HUGETLB
65 /*
66 * On private mappings, the counter to uncharge reservations is stored
67 * here. If these fields are 0, then either the mapping is shared, or
68 * cgroup accounting is disabled for this resv_map.
69 */
70 struct page_counter *reservation_counter;
71 unsigned long pages_per_hpage;
72 struct cgroup_subsys_state *css;
73 #endif
74 };
75
76 /*
77 * Region tracking -- allows tracking of reservations and instantiated pages
78 * across the pages in a mapping.
79 *
80 * The region data structures are embedded into a resv_map and protected
81 * by a resv_map's lock. The set of regions within the resv_map represent
82 * reservations for huge pages, or huge pages that have already been
83 * instantiated within the map. The from and to elements are huge page
84 * indices into the associated mapping. from indicates the starting index
85 * of the region. to represents the first index past the end of the region.
86 *
87 * For example, a file region structure with from == 0 and to == 4 represents
88 * four huge pages in a mapping. It is important to note that the to element
89 * represents the first element past the end of the region. This is used in
90 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
91 *
92 * Interval notation of the form [from, to) will be used to indicate that
93 * the endpoint from is inclusive and to is exclusive.
94 */
95 struct file_region {
96 struct list_head link;
97 long from;
98 long to;
99 #ifdef CONFIG_CGROUP_HUGETLB
100 /*
101 * On shared mappings, each reserved region appears as a struct
102 * file_region in resv_map. These fields hold the info needed to
103 * uncharge each reservation.
104 */
105 struct page_counter *reservation_counter;
106 struct cgroup_subsys_state *css;
107 #endif
108 };
109
110 struct hugetlb_vma_lock {
111 struct kref refs;
112 struct rw_semaphore rw_sema;
113 struct vm_area_struct *vma;
114 };
115
116 extern struct resv_map *resv_map_alloc(void);
117 void resv_map_release(struct kref *ref);
118
119 extern spinlock_t hugetlb_lock;
120 extern int hugetlb_max_hstate __read_mostly;
121 #define for_each_hstate(h) \
122 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
123
124 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
125 long min_hpages);
126 void hugepage_put_subpool(struct hugepage_subpool *spool);
127
128 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
129 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
130 int move_hugetlb_page_tables(struct vm_area_struct *vma,
131 struct vm_area_struct *new_vma,
132 unsigned long old_addr, unsigned long new_addr,
133 unsigned long len);
134 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
135 struct vm_area_struct *, struct vm_area_struct *);
136 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
137 unsigned long address, unsigned int flags,
138 unsigned int *page_mask);
139 void unmap_hugepage_range(struct vm_area_struct *,
140 unsigned long, unsigned long, struct page *,
141 zap_flags_t);
142 void __unmap_hugepage_range(struct mmu_gather *tlb,
143 struct vm_area_struct *vma,
144 unsigned long start, unsigned long end,
145 struct page *ref_page, zap_flags_t zap_flags);
146 void hugetlb_report_meminfo(struct seq_file *);
147 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
148 void hugetlb_show_meminfo_node(int nid);
149 unsigned long hugetlb_total_pages(void);
150 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
151 unsigned long address, unsigned int flags);
152 #ifdef CONFIG_USERFAULTFD
153 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
154 struct vm_area_struct *dst_vma,
155 unsigned long dst_addr,
156 unsigned long src_addr,
157 uffd_flags_t flags,
158 struct folio **foliop);
159 #endif /* CONFIG_USERFAULTFD */
160 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
161 struct vm_area_struct *vma,
162 vm_flags_t vm_flags);
163 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
164 long freed);
165 bool isolate_hugetlb(struct folio *folio, struct list_head *list);
166 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
167 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
168 bool *migratable_cleared);
169 void folio_putback_active_hugetlb(struct folio *folio);
170 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
171 void hugetlb_fix_reserve_counts(struct inode *inode);
172 extern struct mutex *hugetlb_fault_mutex_table;
173 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
174
175 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
176 unsigned long addr, pud_t *pud);
177
178 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
179
180 extern int sysctl_hugetlb_shm_group;
181 extern struct list_head huge_boot_pages;
182
183 /* arch callbacks */
184
185 #ifndef CONFIG_HIGHPTE
186 /*
187 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
188 * which may go down to the lowest PTE level in their huge_pte_offset() and
189 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
190 */
pte_offset_huge(pmd_t * pmd,unsigned long address)191 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
192 {
193 return pte_offset_kernel(pmd, address);
194 }
pte_alloc_huge(struct mm_struct * mm,pmd_t * pmd,unsigned long address)195 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
196 unsigned long address)
197 {
198 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
199 }
200 #endif
201
202 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
203 unsigned long addr, unsigned long sz);
204 /*
205 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
206 * Returns the pte_t* if found, or NULL if the address is not mapped.
207 *
208 * IMPORTANT: we should normally not directly call this function, instead
209 * this is only a common interface to implement arch-specific
210 * walker. Please use hugetlb_walk() instead, because that will attempt to
211 * verify the locking for you.
212 *
213 * Since this function will walk all the pgtable pages (including not only
214 * high-level pgtable page, but also PUD entry that can be unshared
215 * concurrently for VM_SHARED), the caller of this function should be
216 * responsible of its thread safety. One can follow this rule:
217 *
218 * (1) For private mappings: pmd unsharing is not possible, so holding the
219 * mmap_lock for either read or write is sufficient. Most callers
220 * already hold the mmap_lock, so normally, no special action is
221 * required.
222 *
223 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
224 * pgtable page can go away from under us! It can be done by a pmd
225 * unshare with a follow up munmap() on the other process), then we
226 * need either:
227 *
228 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
229 * won't happen upon the range (it also makes sure the pte_t we
230 * read is the right and stable one), or,
231 *
232 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
233 * sure even if unshare happened the racy unmap() will wait until
234 * i_mmap_rwsem is released.
235 *
236 * Option (2.1) is the safest, which guarantees pte stability from pmd
237 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
238 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
239 * access.
240 */
241 pte_t *huge_pte_offset(struct mm_struct *mm,
242 unsigned long addr, unsigned long sz);
243 unsigned long hugetlb_mask_last_page(struct hstate *h);
244 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
245 unsigned long addr, pte_t *ptep);
246 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
247 unsigned long *start, unsigned long *end);
248
249 extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
250 unsigned long *begin, unsigned long *end);
251 extern void __hugetlb_zap_end(struct vm_area_struct *vma,
252 struct zap_details *details);
253
hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)254 static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
255 unsigned long *start, unsigned long *end)
256 {
257 if (is_vm_hugetlb_page(vma))
258 __hugetlb_zap_begin(vma, start, end);
259 }
260
hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)261 static inline void hugetlb_zap_end(struct vm_area_struct *vma,
262 struct zap_details *details)
263 {
264 if (is_vm_hugetlb_page(vma))
265 __hugetlb_zap_end(vma, details);
266 }
267
268 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
269 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
270 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
271 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
272 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
273 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
274 void hugetlb_vma_lock_release(struct kref *kref);
275
276 int pmd_huge(pmd_t pmd);
277 int pud_huge(pud_t pud);
278 long hugetlb_change_protection(struct vm_area_struct *vma,
279 unsigned long address, unsigned long end, pgprot_t newprot,
280 unsigned long cp_flags);
281
282 bool is_hugetlb_entry_migration(pte_t pte);
283 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
284
285 #else /* !CONFIG_HUGETLB_PAGE */
286
hugetlb_dup_vma_private(struct vm_area_struct * vma)287 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
288 {
289 }
290
clear_vma_resv_huge_pages(struct vm_area_struct * vma)291 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
292 {
293 }
294
hugetlb_total_pages(void)295 static inline unsigned long hugetlb_total_pages(void)
296 {
297 return 0;
298 }
299
hugetlb_page_mapping_lock_write(struct page * hpage)300 static inline struct address_space *hugetlb_page_mapping_lock_write(
301 struct page *hpage)
302 {
303 return NULL;
304 }
305
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)306 static inline int huge_pmd_unshare(struct mm_struct *mm,
307 struct vm_area_struct *vma,
308 unsigned long addr, pte_t *ptep)
309 {
310 return 0;
311 }
312
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)313 static inline void adjust_range_if_pmd_sharing_possible(
314 struct vm_area_struct *vma,
315 unsigned long *start, unsigned long *end)
316 {
317 }
318
hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)319 static inline void hugetlb_zap_begin(
320 struct vm_area_struct *vma,
321 unsigned long *start, unsigned long *end)
322 {
323 }
324
hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)325 static inline void hugetlb_zap_end(
326 struct vm_area_struct *vma,
327 struct zap_details *details)
328 {
329 }
330
hugetlb_follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned int * page_mask)331 static inline struct page *hugetlb_follow_page_mask(
332 struct vm_area_struct *vma, unsigned long address, unsigned int flags,
333 unsigned int *page_mask)
334 {
335 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
336 }
337
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)338 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
339 struct mm_struct *src,
340 struct vm_area_struct *dst_vma,
341 struct vm_area_struct *src_vma)
342 {
343 BUG();
344 return 0;
345 }
346
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)347 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
348 struct vm_area_struct *new_vma,
349 unsigned long old_addr,
350 unsigned long new_addr,
351 unsigned long len)
352 {
353 BUG();
354 return 0;
355 }
356
hugetlb_report_meminfo(struct seq_file * m)357 static inline void hugetlb_report_meminfo(struct seq_file *m)
358 {
359 }
360
hugetlb_report_node_meminfo(char * buf,int len,int nid)361 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
362 {
363 return 0;
364 }
365
hugetlb_show_meminfo_node(int nid)366 static inline void hugetlb_show_meminfo_node(int nid)
367 {
368 }
369
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)370 static inline int prepare_hugepage_range(struct file *file,
371 unsigned long addr, unsigned long len)
372 {
373 return -EINVAL;
374 }
375
hugetlb_vma_lock_read(struct vm_area_struct * vma)376 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
377 {
378 }
379
hugetlb_vma_unlock_read(struct vm_area_struct * vma)380 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
381 {
382 }
383
hugetlb_vma_lock_write(struct vm_area_struct * vma)384 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
385 {
386 }
387
hugetlb_vma_unlock_write(struct vm_area_struct * vma)388 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
389 {
390 }
391
hugetlb_vma_trylock_write(struct vm_area_struct * vma)392 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
393 {
394 return 1;
395 }
396
hugetlb_vma_assert_locked(struct vm_area_struct * vma)397 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
398 {
399 }
400
pmd_huge(pmd_t pmd)401 static inline int pmd_huge(pmd_t pmd)
402 {
403 return 0;
404 }
405
pud_huge(pud_t pud)406 static inline int pud_huge(pud_t pud)
407 {
408 return 0;
409 }
410
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)411 static inline int is_hugepage_only_range(struct mm_struct *mm,
412 unsigned long addr, unsigned long len)
413 {
414 return 0;
415 }
416
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)417 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
418 unsigned long addr, unsigned long end,
419 unsigned long floor, unsigned long ceiling)
420 {
421 BUG();
422 }
423
424 #ifdef CONFIG_USERFAULTFD
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)425 static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
426 struct vm_area_struct *dst_vma,
427 unsigned long dst_addr,
428 unsigned long src_addr,
429 uffd_flags_t flags,
430 struct folio **foliop)
431 {
432 BUG();
433 return 0;
434 }
435 #endif /* CONFIG_USERFAULTFD */
436
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)437 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
438 unsigned long sz)
439 {
440 return NULL;
441 }
442
isolate_hugetlb(struct folio * folio,struct list_head * list)443 static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
444 {
445 return false;
446 }
447
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)448 static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
449 {
450 return 0;
451 }
452
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)453 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
454 bool *migratable_cleared)
455 {
456 return 0;
457 }
458
folio_putback_active_hugetlb(struct folio * folio)459 static inline void folio_putback_active_hugetlb(struct folio *folio)
460 {
461 }
462
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)463 static inline void move_hugetlb_state(struct folio *old_folio,
464 struct folio *new_folio, int reason)
465 {
466 }
467
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)468 static inline long hugetlb_change_protection(
469 struct vm_area_struct *vma, unsigned long address,
470 unsigned long end, pgprot_t newprot,
471 unsigned long cp_flags)
472 {
473 return 0;
474 }
475
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page,zap_flags_t zap_flags)476 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
477 struct vm_area_struct *vma, unsigned long start,
478 unsigned long end, struct page *ref_page,
479 zap_flags_t zap_flags)
480 {
481 BUG();
482 }
483
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)484 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
485 struct vm_area_struct *vma, unsigned long address,
486 unsigned int flags)
487 {
488 BUG();
489 return 0;
490 }
491
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)492 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
493
494 #endif /* !CONFIG_HUGETLB_PAGE */
495 /*
496 * hugepages at page global directory. If arch support
497 * hugepages at pgd level, they need to define this.
498 */
499 #ifndef pgd_huge
500 #define pgd_huge(x) 0
501 #endif
502 #ifndef p4d_huge
503 #define p4d_huge(x) 0
504 #endif
505
506 #ifndef pgd_write
pgd_write(pgd_t pgd)507 static inline int pgd_write(pgd_t pgd)
508 {
509 BUG();
510 return 0;
511 }
512 #endif
513
514 #define HUGETLB_ANON_FILE "anon_hugepage"
515
516 enum {
517 /*
518 * The file will be used as an shm file so shmfs accounting rules
519 * apply
520 */
521 HUGETLB_SHMFS_INODE = 1,
522 /*
523 * The file is being created on the internal vfs mount and shmfs
524 * accounting rules do not apply
525 */
526 HUGETLB_ANONHUGE_INODE = 2,
527 };
528
529 #ifdef CONFIG_HUGETLBFS
530 struct hugetlbfs_sb_info {
531 long max_inodes; /* inodes allowed */
532 long free_inodes; /* inodes free */
533 spinlock_t stat_lock;
534 struct hstate *hstate;
535 struct hugepage_subpool *spool;
536 kuid_t uid;
537 kgid_t gid;
538 umode_t mode;
539 };
540
HUGETLBFS_SB(struct super_block * sb)541 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
542 {
543 return sb->s_fs_info;
544 }
545
546 struct hugetlbfs_inode_info {
547 struct shared_policy policy;
548 struct inode vfs_inode;
549 unsigned int seals;
550 };
551
HUGETLBFS_I(struct inode * inode)552 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
553 {
554 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
555 }
556
557 extern const struct file_operations hugetlbfs_file_operations;
558 extern const struct vm_operations_struct hugetlb_vm_ops;
559 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
560 int creat_flags, int page_size_log);
561
is_file_hugepages(struct file * file)562 static inline bool is_file_hugepages(struct file *file)
563 {
564 if (file->f_op == &hugetlbfs_file_operations)
565 return true;
566
567 return is_file_shm_hugepages(file);
568 }
569
hstate_inode(struct inode * i)570 static inline struct hstate *hstate_inode(struct inode *i)
571 {
572 return HUGETLBFS_SB(i->i_sb)->hstate;
573 }
574 #else /* !CONFIG_HUGETLBFS */
575
576 #define is_file_hugepages(file) false
577 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,int creat_flags,int page_size_log)578 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
579 int creat_flags, int page_size_log)
580 {
581 return ERR_PTR(-ENOSYS);
582 }
583
hstate_inode(struct inode * i)584 static inline struct hstate *hstate_inode(struct inode *i)
585 {
586 return NULL;
587 }
588 #endif /* !CONFIG_HUGETLBFS */
589
590 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
591 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
592 unsigned long len, unsigned long pgoff,
593 unsigned long flags);
594 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
595
596 unsigned long
597 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
598 unsigned long len, unsigned long pgoff,
599 unsigned long flags);
600
601 /*
602 * huegtlb page specific state flags. These flags are located in page.private
603 * of the hugetlb head page. Functions created via the below macros should be
604 * used to manipulate these flags.
605 *
606 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
607 * allocation time. Cleared when page is fully instantiated. Free
608 * routine checks flag to restore a reservation on error paths.
609 * Synchronization: Examined or modified by code that knows it has
610 * the only reference to page. i.e. After allocation but before use
611 * or when the page is being freed.
612 * HPG_migratable - Set after a newly allocated page is added to the page
613 * cache and/or page tables. Indicates the page is a candidate for
614 * migration.
615 * Synchronization: Initially set after new page allocation with no
616 * locking. When examined and modified during migration processing
617 * (isolate, migrate, putback) the hugetlb_lock is held.
618 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
619 * allocator. Typically used for migration target pages when no pages
620 * are available in the pool. The hugetlb free page path will
621 * immediately free pages with this flag set to the buddy allocator.
622 * Synchronization: Can be set after huge page allocation from buddy when
623 * code knows it has only reference. All other examinations and
624 * modifications require hugetlb_lock.
625 * HPG_freed - Set when page is on the free lists.
626 * Synchronization: hugetlb_lock held for examination and modification.
627 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
628 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
629 * that is not tracked by raw_hwp_page list.
630 */
631 enum hugetlb_page_flags {
632 HPG_restore_reserve = 0,
633 HPG_migratable,
634 HPG_temporary,
635 HPG_freed,
636 HPG_vmemmap_optimized,
637 HPG_raw_hwp_unreliable,
638 __NR_HPAGEFLAGS,
639 };
640
641 /*
642 * Macros to create test, set and clear function definitions for
643 * hugetlb specific page flags.
644 */
645 #ifdef CONFIG_HUGETLB_PAGE
646 #define TESTHPAGEFLAG(uname, flname) \
647 static __always_inline \
648 bool folio_test_hugetlb_##flname(struct folio *folio) \
649 { void *private = &folio->private; \
650 return test_bit(HPG_##flname, private); \
651 } \
652 static inline int HPage##uname(struct page *page) \
653 { return test_bit(HPG_##flname, &(page->private)); }
654
655 #define SETHPAGEFLAG(uname, flname) \
656 static __always_inline \
657 void folio_set_hugetlb_##flname(struct folio *folio) \
658 { void *private = &folio->private; \
659 set_bit(HPG_##flname, private); \
660 } \
661 static inline void SetHPage##uname(struct page *page) \
662 { set_bit(HPG_##flname, &(page->private)); }
663
664 #define CLEARHPAGEFLAG(uname, flname) \
665 static __always_inline \
666 void folio_clear_hugetlb_##flname(struct folio *folio) \
667 { void *private = &folio->private; \
668 clear_bit(HPG_##flname, private); \
669 } \
670 static inline void ClearHPage##uname(struct page *page) \
671 { clear_bit(HPG_##flname, &(page->private)); }
672 #else
673 #define TESTHPAGEFLAG(uname, flname) \
674 static inline bool \
675 folio_test_hugetlb_##flname(struct folio *folio) \
676 { return 0; } \
677 static inline int HPage##uname(struct page *page) \
678 { return 0; }
679
680 #define SETHPAGEFLAG(uname, flname) \
681 static inline void \
682 folio_set_hugetlb_##flname(struct folio *folio) \
683 { } \
684 static inline void SetHPage##uname(struct page *page) \
685 { }
686
687 #define CLEARHPAGEFLAG(uname, flname) \
688 static inline void \
689 folio_clear_hugetlb_##flname(struct folio *folio) \
690 { } \
691 static inline void ClearHPage##uname(struct page *page) \
692 { }
693 #endif
694
695 #define HPAGEFLAG(uname, flname) \
696 TESTHPAGEFLAG(uname, flname) \
697 SETHPAGEFLAG(uname, flname) \
698 CLEARHPAGEFLAG(uname, flname) \
699
700 /*
701 * Create functions associated with hugetlb page flags
702 */
703 HPAGEFLAG(RestoreReserve, restore_reserve)
704 HPAGEFLAG(Migratable, migratable)
705 HPAGEFLAG(Temporary, temporary)
706 HPAGEFLAG(Freed, freed)
707 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
708 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
709
710 #ifdef CONFIG_HUGETLB_PAGE
711
712 #define HSTATE_NAME_LEN 32
713 /* Defines one hugetlb page size */
714 struct hstate {
715 struct mutex resize_lock;
716 struct lock_class_key resize_key;
717 int next_nid_to_alloc;
718 int next_nid_to_free;
719 unsigned int order;
720 unsigned int demote_order;
721 unsigned long mask;
722 unsigned long max_huge_pages;
723 unsigned long nr_huge_pages;
724 unsigned long free_huge_pages;
725 unsigned long resv_huge_pages;
726 unsigned long surplus_huge_pages;
727 unsigned long nr_overcommit_huge_pages;
728 struct list_head hugepage_activelist;
729 struct list_head hugepage_freelists[MAX_NUMNODES];
730 unsigned int max_huge_pages_node[MAX_NUMNODES];
731 unsigned int nr_huge_pages_node[MAX_NUMNODES];
732 unsigned int free_huge_pages_node[MAX_NUMNODES];
733 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
734 #ifdef CONFIG_CGROUP_HUGETLB
735 /* cgroup control files */
736 struct cftype cgroup_files_dfl[8];
737 struct cftype cgroup_files_legacy[10];
738 #endif
739 char name[HSTATE_NAME_LEN];
740 };
741
742 struct huge_bootmem_page {
743 struct list_head list;
744 struct hstate *hstate;
745 };
746
747 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
748 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
749 unsigned long addr, int avoid_reserve);
750 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
751 nodemask_t *nmask, gfp_t gfp_mask);
752 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
753 unsigned long address);
754 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
755 pgoff_t idx);
756 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
757 unsigned long address, struct folio *folio);
758
759 /* arch callback */
760 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
761 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
762 bool __init hugetlb_node_alloc_supported(void);
763
764 void __init hugetlb_add_hstate(unsigned order);
765 bool __init arch_hugetlb_valid_size(unsigned long size);
766 struct hstate *size_to_hstate(unsigned long size);
767
768 #ifndef HUGE_MAX_HSTATE
769 #define HUGE_MAX_HSTATE 1
770 #endif
771
772 extern struct hstate hstates[HUGE_MAX_HSTATE];
773 extern unsigned int default_hstate_idx;
774
775 #define default_hstate (hstates[default_hstate_idx])
776
hugetlb_folio_subpool(struct folio * folio)777 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
778 {
779 return folio->_hugetlb_subpool;
780 }
781
hugetlb_set_folio_subpool(struct folio * folio,struct hugepage_subpool * subpool)782 static inline void hugetlb_set_folio_subpool(struct folio *folio,
783 struct hugepage_subpool *subpool)
784 {
785 folio->_hugetlb_subpool = subpool;
786 }
787
hstate_file(struct file * f)788 static inline struct hstate *hstate_file(struct file *f)
789 {
790 return hstate_inode(file_inode(f));
791 }
792
hstate_sizelog(int page_size_log)793 static inline struct hstate *hstate_sizelog(int page_size_log)
794 {
795 if (!page_size_log)
796 return &default_hstate;
797
798 if (page_size_log < BITS_PER_LONG)
799 return size_to_hstate(1UL << page_size_log);
800
801 return NULL;
802 }
803
hstate_vma(struct vm_area_struct * vma)804 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
805 {
806 return hstate_file(vma->vm_file);
807 }
808
huge_page_size(const struct hstate * h)809 static inline unsigned long huge_page_size(const struct hstate *h)
810 {
811 return (unsigned long)PAGE_SIZE << h->order;
812 }
813
814 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
815
816 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
817
huge_page_mask(struct hstate * h)818 static inline unsigned long huge_page_mask(struct hstate *h)
819 {
820 return h->mask;
821 }
822
huge_page_order(struct hstate * h)823 static inline unsigned int huge_page_order(struct hstate *h)
824 {
825 return h->order;
826 }
827
huge_page_shift(struct hstate * h)828 static inline unsigned huge_page_shift(struct hstate *h)
829 {
830 return h->order + PAGE_SHIFT;
831 }
832
hstate_is_gigantic(struct hstate * h)833 static inline bool hstate_is_gigantic(struct hstate *h)
834 {
835 return huge_page_order(h) > MAX_ORDER;
836 }
837
pages_per_huge_page(const struct hstate * h)838 static inline unsigned int pages_per_huge_page(const struct hstate *h)
839 {
840 return 1 << h->order;
841 }
842
blocks_per_huge_page(struct hstate * h)843 static inline unsigned int blocks_per_huge_page(struct hstate *h)
844 {
845 return huge_page_size(h) / 512;
846 }
847
848 #include <asm/hugetlb.h>
849
850 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)851 static inline int is_hugepage_only_range(struct mm_struct *mm,
852 unsigned long addr, unsigned long len)
853 {
854 return 0;
855 }
856 #define is_hugepage_only_range is_hugepage_only_range
857 #endif
858
859 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)860 static inline void arch_clear_hugepage_flags(struct page *page) { }
861 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
862 #endif
863
864 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)865 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
866 vm_flags_t flags)
867 {
868 return pte_mkhuge(entry);
869 }
870 #endif
871
folio_hstate(struct folio * folio)872 static inline struct hstate *folio_hstate(struct folio *folio)
873 {
874 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
875 return size_to_hstate(folio_size(folio));
876 }
877
hstate_index_to_shift(unsigned index)878 static inline unsigned hstate_index_to_shift(unsigned index)
879 {
880 return hstates[index].order + PAGE_SHIFT;
881 }
882
hstate_index(struct hstate * h)883 static inline int hstate_index(struct hstate *h)
884 {
885 return h - hstates;
886 }
887
888 extern int dissolve_free_huge_page(struct page *page);
889 extern int dissolve_free_huge_pages(unsigned long start_pfn,
890 unsigned long end_pfn);
891
892 #ifdef CONFIG_MEMORY_FAILURE
893 extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
894 #else
folio_clear_hugetlb_hwpoison(struct folio * folio)895 static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
896 {
897 }
898 #endif
899
900 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
901 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)902 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
903 {
904 if ((huge_page_shift(h) == PMD_SHIFT) ||
905 (huge_page_shift(h) == PUD_SHIFT) ||
906 (huge_page_shift(h) == PGDIR_SHIFT))
907 return true;
908 else
909 return false;
910 }
911 #endif
912 #else
arch_hugetlb_migration_supported(struct hstate * h)913 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
914 {
915 return false;
916 }
917 #endif
918
hugepage_migration_supported(struct hstate * h)919 static inline bool hugepage_migration_supported(struct hstate *h)
920 {
921 return arch_hugetlb_migration_supported(h);
922 }
923
924 /*
925 * Movability check is different as compared to migration check.
926 * It determines whether or not a huge page should be placed on
927 * movable zone or not. Movability of any huge page should be
928 * required only if huge page size is supported for migration.
929 * There won't be any reason for the huge page to be movable if
930 * it is not migratable to start with. Also the size of the huge
931 * page should be large enough to be placed under a movable zone
932 * and still feasible enough to be migratable. Just the presence
933 * in movable zone does not make the migration feasible.
934 *
935 * So even though large huge page sizes like the gigantic ones
936 * are migratable they should not be movable because its not
937 * feasible to migrate them from movable zone.
938 */
hugepage_movable_supported(struct hstate * h)939 static inline bool hugepage_movable_supported(struct hstate *h)
940 {
941 if (!hugepage_migration_supported(h))
942 return false;
943
944 if (hstate_is_gigantic(h))
945 return false;
946 return true;
947 }
948
949 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)950 static inline gfp_t htlb_alloc_mask(struct hstate *h)
951 {
952 if (hugepage_movable_supported(h))
953 return GFP_HIGHUSER_MOVABLE;
954 else
955 return GFP_HIGHUSER;
956 }
957
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)958 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
959 {
960 gfp_t modified_mask = htlb_alloc_mask(h);
961
962 /* Some callers might want to enforce node */
963 modified_mask |= (gfp_mask & __GFP_THISNODE);
964
965 modified_mask |= (gfp_mask & __GFP_NOWARN);
966
967 return modified_mask;
968 }
969
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)970 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
971 struct mm_struct *mm, pte_t *pte)
972 {
973 if (huge_page_size(h) == PMD_SIZE)
974 return pmd_lockptr(mm, (pmd_t *) pte);
975 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
976 return &mm->page_table_lock;
977 }
978
979 #ifndef hugepages_supported
980 /*
981 * Some platform decide whether they support huge pages at boot
982 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
983 * when there is no such support
984 */
985 #define hugepages_supported() (HPAGE_SHIFT != 0)
986 #endif
987
988 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
989
hugetlb_count_init(struct mm_struct * mm)990 static inline void hugetlb_count_init(struct mm_struct *mm)
991 {
992 atomic_long_set(&mm->hugetlb_usage, 0);
993 }
994
hugetlb_count_add(long l,struct mm_struct * mm)995 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
996 {
997 atomic_long_add(l, &mm->hugetlb_usage);
998 }
999
hugetlb_count_sub(long l,struct mm_struct * mm)1000 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1001 {
1002 atomic_long_sub(l, &mm->hugetlb_usage);
1003 }
1004
1005 #ifndef huge_ptep_modify_prot_start
1006 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1007 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1008 unsigned long addr, pte_t *ptep)
1009 {
1010 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1011 }
1012 #endif
1013
1014 #ifndef huge_ptep_modify_prot_commit
1015 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1016 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1017 unsigned long addr, pte_t *ptep,
1018 pte_t old_pte, pte_t pte)
1019 {
1020 unsigned long psize = huge_page_size(hstate_vma(vma));
1021
1022 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
1023 }
1024 #endif
1025
1026 #ifdef CONFIG_NUMA
1027 void hugetlb_register_node(struct node *node);
1028 void hugetlb_unregister_node(struct node *node);
1029 #endif
1030
1031 /*
1032 * Check if a given raw @page in a hugepage is HWPOISON.
1033 */
1034 bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1035
1036 #else /* CONFIG_HUGETLB_PAGE */
1037 struct hstate {};
1038
1039 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1040 {
1041 return NULL;
1042 }
1043
1044 static inline int isolate_or_dissolve_huge_page(struct page *page,
1045 struct list_head *list)
1046 {
1047 return -ENOMEM;
1048 }
1049
1050 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1051 unsigned long addr,
1052 int avoid_reserve)
1053 {
1054 return NULL;
1055 }
1056
1057 static inline struct folio *
1058 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1059 nodemask_t *nmask, gfp_t gfp_mask)
1060 {
1061 return NULL;
1062 }
1063
1064 static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
1065 struct vm_area_struct *vma,
1066 unsigned long address)
1067 {
1068 return NULL;
1069 }
1070
1071 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1072 {
1073 return 0;
1074 }
1075
1076 static inline struct hstate *hstate_file(struct file *f)
1077 {
1078 return NULL;
1079 }
1080
1081 static inline struct hstate *hstate_sizelog(int page_size_log)
1082 {
1083 return NULL;
1084 }
1085
1086 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1087 {
1088 return NULL;
1089 }
1090
1091 static inline struct hstate *folio_hstate(struct folio *folio)
1092 {
1093 return NULL;
1094 }
1095
1096 static inline struct hstate *size_to_hstate(unsigned long size)
1097 {
1098 return NULL;
1099 }
1100
1101 static inline unsigned long huge_page_size(struct hstate *h)
1102 {
1103 return PAGE_SIZE;
1104 }
1105
1106 static inline unsigned long huge_page_mask(struct hstate *h)
1107 {
1108 return PAGE_MASK;
1109 }
1110
1111 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1112 {
1113 return PAGE_SIZE;
1114 }
1115
1116 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1117 {
1118 return PAGE_SIZE;
1119 }
1120
1121 static inline unsigned int huge_page_order(struct hstate *h)
1122 {
1123 return 0;
1124 }
1125
1126 static inline unsigned int huge_page_shift(struct hstate *h)
1127 {
1128 return PAGE_SHIFT;
1129 }
1130
1131 static inline bool hstate_is_gigantic(struct hstate *h)
1132 {
1133 return false;
1134 }
1135
1136 static inline unsigned int pages_per_huge_page(struct hstate *h)
1137 {
1138 return 1;
1139 }
1140
1141 static inline unsigned hstate_index_to_shift(unsigned index)
1142 {
1143 return 0;
1144 }
1145
1146 static inline int hstate_index(struct hstate *h)
1147 {
1148 return 0;
1149 }
1150
1151 static inline int dissolve_free_huge_page(struct page *page)
1152 {
1153 return 0;
1154 }
1155
1156 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1157 unsigned long end_pfn)
1158 {
1159 return 0;
1160 }
1161
1162 static inline bool hugepage_migration_supported(struct hstate *h)
1163 {
1164 return false;
1165 }
1166
1167 static inline bool hugepage_movable_supported(struct hstate *h)
1168 {
1169 return false;
1170 }
1171
1172 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1173 {
1174 return 0;
1175 }
1176
1177 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1178 {
1179 return 0;
1180 }
1181
1182 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1183 struct mm_struct *mm, pte_t *pte)
1184 {
1185 return &mm->page_table_lock;
1186 }
1187
1188 static inline void hugetlb_count_init(struct mm_struct *mm)
1189 {
1190 }
1191
1192 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1193 {
1194 }
1195
1196 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1197 {
1198 }
1199
1200 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1201 unsigned long addr, pte_t *ptep)
1202 {
1203 #ifdef CONFIG_MMU
1204 return ptep_get(ptep);
1205 #else
1206 return *ptep;
1207 #endif
1208 }
1209
1210 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1211 pte_t *ptep, pte_t pte, unsigned long sz)
1212 {
1213 }
1214
1215 static inline void hugetlb_register_node(struct node *node)
1216 {
1217 }
1218
1219 static inline void hugetlb_unregister_node(struct node *node)
1220 {
1221 }
1222 #endif /* CONFIG_HUGETLB_PAGE */
1223
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1224 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1225 struct mm_struct *mm, pte_t *pte)
1226 {
1227 spinlock_t *ptl;
1228
1229 ptl = huge_pte_lockptr(h, mm, pte);
1230 spin_lock(ptl);
1231 return ptl;
1232 }
1233
1234 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1235 extern void __init hugetlb_cma_reserve(int order);
1236 #else
hugetlb_cma_reserve(int order)1237 static inline __init void hugetlb_cma_reserve(int order)
1238 {
1239 }
1240 #endif
1241
1242 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
hugetlb_pmd_shared(pte_t * pte)1243 static inline bool hugetlb_pmd_shared(pte_t *pte)
1244 {
1245 return page_count(virt_to_page(pte)) > 1;
1246 }
1247 #else
hugetlb_pmd_shared(pte_t * pte)1248 static inline bool hugetlb_pmd_shared(pte_t *pte)
1249 {
1250 return false;
1251 }
1252 #endif
1253
1254 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1255
1256 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1257 /*
1258 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1259 * implement this.
1260 */
1261 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1262 #endif
1263
__vma_shareable_lock(struct vm_area_struct * vma)1264 static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1265 {
1266 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1267 }
1268
1269 bool __vma_private_lock(struct vm_area_struct *vma);
1270
1271 /*
1272 * Safe version of huge_pte_offset() to check the locks. See comments
1273 * above huge_pte_offset().
1274 */
1275 static inline pte_t *
hugetlb_walk(struct vm_area_struct * vma,unsigned long addr,unsigned long sz)1276 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1277 {
1278 #if defined(CONFIG_HUGETLB_PAGE) && \
1279 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1280 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1281
1282 /*
1283 * If pmd sharing possible, locking needed to safely walk the
1284 * hugetlb pgtables. More information can be found at the comment
1285 * above huge_pte_offset() in the same file.
1286 *
1287 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1288 */
1289 if (__vma_shareable_lock(vma))
1290 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1291 !lockdep_is_held(
1292 &vma->vm_file->f_mapping->i_mmap_rwsem));
1293 #endif
1294 return huge_pte_offset(vma->vm_mm, addr, sz);
1295 }
1296
1297 #endif /* _LINUX_HUGETLB_H */
1298