xref: /openbmc/linux/include/linux/hugetlb.h (revision 1802d0be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4 
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <asm/pgtable.h>
13 
14 struct ctl_table;
15 struct user_struct;
16 struct mmu_gather;
17 
18 #ifndef is_hugepd
19 /*
20  * Some architectures requires a hugepage directory format that is
21  * required to support multiple hugepage sizes. For example
22  * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23  * introduced the same on powerpc. This allows for a more flexible hugepage
24  * pagetable layout.
25  */
26 typedef struct { unsigned long pd; } hugepd_t;
27 #define is_hugepd(hugepd) (0)
28 #define __hugepd(x) ((hugepd_t) { (x) })
29 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 			      unsigned pdshift, unsigned long end,
31 			      int write, struct page **pages, int *nr)
32 {
33 	return 0;
34 }
35 #else
36 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 		       unsigned pdshift, unsigned long end,
38 		       int write, struct page **pages, int *nr);
39 #endif
40 
41 
42 #ifdef CONFIG_HUGETLB_PAGE
43 
44 #include <linux/mempolicy.h>
45 #include <linux/shm.h>
46 #include <asm/tlbflush.h>
47 
48 struct hugepage_subpool {
49 	spinlock_t lock;
50 	long count;
51 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
52 	long used_hpages;	/* Used count against maximum, includes */
53 				/* both alloced and reserved pages. */
54 	struct hstate *hstate;
55 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
56 	long rsv_hpages;	/* Pages reserved against global pool to */
57 				/* sasitfy minimum size. */
58 };
59 
60 struct resv_map {
61 	struct kref refs;
62 	spinlock_t lock;
63 	struct list_head regions;
64 	long adds_in_progress;
65 	struct list_head region_cache;
66 	long region_cache_count;
67 };
68 extern struct resv_map *resv_map_alloc(void);
69 void resv_map_release(struct kref *ref);
70 
71 extern spinlock_t hugetlb_lock;
72 extern int hugetlb_max_hstate __read_mostly;
73 #define for_each_hstate(h) \
74 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75 
76 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 						long min_hpages);
78 void hugepage_put_subpool(struct hugepage_subpool *spool);
79 
80 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84 
85 #ifdef CONFIG_NUMA
86 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 					void __user *, size_t *, loff_t *);
88 #endif
89 
90 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 			 struct page **, struct vm_area_struct **,
93 			 unsigned long *, unsigned long *, long, unsigned int,
94 			 int *);
95 void unmap_hugepage_range(struct vm_area_struct *,
96 			  unsigned long, unsigned long, struct page *);
97 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 			  struct vm_area_struct *vma,
99 			  unsigned long start, unsigned long end,
100 			  struct page *ref_page);
101 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 				unsigned long start, unsigned long end,
103 				struct page *ref_page);
104 void hugetlb_report_meminfo(struct seq_file *);
105 int hugetlb_report_node_meminfo(int, char *);
106 void hugetlb_show_meminfo(void);
107 unsigned long hugetlb_total_pages(void);
108 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109 			unsigned long address, unsigned int flags);
110 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 				struct vm_area_struct *dst_vma,
112 				unsigned long dst_addr,
113 				unsigned long src_addr,
114 				struct page **pagep);
115 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116 						struct vm_area_struct *vma,
117 						vm_flags_t vm_flags);
118 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 						long freed);
120 bool isolate_huge_page(struct page *page, struct list_head *list);
121 void putback_active_hugepage(struct page *page);
122 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123 void free_huge_page(struct page *page);
124 void hugetlb_fix_reserve_counts(struct inode *inode);
125 extern struct mutex *hugetlb_fault_mutex_table;
126 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
127 				pgoff_t idx, unsigned long address);
128 
129 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
130 
131 extern int sysctl_hugetlb_shm_group;
132 extern struct list_head huge_boot_pages;
133 
134 /* arch callbacks */
135 
136 pte_t *huge_pte_alloc(struct mm_struct *mm,
137 			unsigned long addr, unsigned long sz);
138 pte_t *huge_pte_offset(struct mm_struct *mm,
139 		       unsigned long addr, unsigned long sz);
140 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
141 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
142 				unsigned long *start, unsigned long *end);
143 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 			      int write);
145 struct page *follow_huge_pd(struct vm_area_struct *vma,
146 			    unsigned long address, hugepd_t hpd,
147 			    int flags, int pdshift);
148 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149 				pmd_t *pmd, int flags);
150 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151 				pud_t *pud, int flags);
152 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153 			     pgd_t *pgd, int flags);
154 
155 int pmd_huge(pmd_t pmd);
156 int pud_huge(pud_t pud);
157 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 		unsigned long address, unsigned long end, pgprot_t newprot);
159 
160 bool is_hugetlb_entry_migration(pte_t pte);
161 
162 #else /* !CONFIG_HUGETLB_PAGE */
163 
164 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
165 {
166 }
167 
168 static inline unsigned long hugetlb_total_pages(void)
169 {
170 	return 0;
171 }
172 
173 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
174 					pte_t *ptep)
175 {
176 	return 0;
177 }
178 
179 static inline void adjust_range_if_pmd_sharing_possible(
180 				struct vm_area_struct *vma,
181 				unsigned long *start, unsigned long *end)
182 {
183 }
184 
185 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)	({ BUG(); 0; })
186 #define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
187 #define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
188 static inline void hugetlb_report_meminfo(struct seq_file *m)
189 {
190 }
191 #define hugetlb_report_node_meminfo(n, buf)	0
192 static inline void hugetlb_show_meminfo(void)
193 {
194 }
195 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
196 #define follow_huge_pmd(mm, addr, pmd, flags)	NULL
197 #define follow_huge_pud(mm, addr, pud, flags)	NULL
198 #define follow_huge_pgd(mm, addr, pgd, flags)	NULL
199 #define prepare_hugepage_range(file, addr, len)	(-EINVAL)
200 #define pmd_huge(x)	0
201 #define pud_huge(x)	0
202 #define is_hugepage_only_range(mm, addr, len)	0
203 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
204 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
205 				src_addr, pagep)	({ BUG(); 0; })
206 #define huge_pte_offset(mm, address, sz)	0
207 
208 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
209 {
210 	return false;
211 }
212 #define putback_active_hugepage(p)	do {} while (0)
213 #define move_hugetlb_state(old, new, reason)	do {} while (0)
214 
215 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
216 		unsigned long address, unsigned long end, pgprot_t newprot)
217 {
218 	return 0;
219 }
220 
221 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
222 			struct vm_area_struct *vma, unsigned long start,
223 			unsigned long end, struct page *ref_page)
224 {
225 	BUG();
226 }
227 
228 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
229 			struct vm_area_struct *vma, unsigned long start,
230 			unsigned long end, struct page *ref_page)
231 {
232 	BUG();
233 }
234 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
235 				struct vm_area_struct *vma, unsigned long address,
236 				unsigned int flags)
237 {
238 	BUG();
239 	return 0;
240 }
241 
242 #endif /* !CONFIG_HUGETLB_PAGE */
243 /*
244  * hugepages at page global directory. If arch support
245  * hugepages at pgd level, they need to define this.
246  */
247 #ifndef pgd_huge
248 #define pgd_huge(x)	0
249 #endif
250 #ifndef p4d_huge
251 #define p4d_huge(x)	0
252 #endif
253 
254 #ifndef pgd_write
255 static inline int pgd_write(pgd_t pgd)
256 {
257 	BUG();
258 	return 0;
259 }
260 #endif
261 
262 #define HUGETLB_ANON_FILE "anon_hugepage"
263 
264 enum {
265 	/*
266 	 * The file will be used as an shm file so shmfs accounting rules
267 	 * apply
268 	 */
269 	HUGETLB_SHMFS_INODE     = 1,
270 	/*
271 	 * The file is being created on the internal vfs mount and shmfs
272 	 * accounting rules do not apply
273 	 */
274 	HUGETLB_ANONHUGE_INODE  = 2,
275 };
276 
277 #ifdef CONFIG_HUGETLBFS
278 struct hugetlbfs_sb_info {
279 	long	max_inodes;   /* inodes allowed */
280 	long	free_inodes;  /* inodes free */
281 	spinlock_t	stat_lock;
282 	struct hstate *hstate;
283 	struct hugepage_subpool *spool;
284 	kuid_t	uid;
285 	kgid_t	gid;
286 	umode_t mode;
287 };
288 
289 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
290 {
291 	return sb->s_fs_info;
292 }
293 
294 struct hugetlbfs_inode_info {
295 	struct shared_policy policy;
296 	struct inode vfs_inode;
297 	unsigned int seals;
298 };
299 
300 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
301 {
302 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
303 }
304 
305 extern const struct file_operations hugetlbfs_file_operations;
306 extern const struct vm_operations_struct hugetlb_vm_ops;
307 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
308 				struct user_struct **user, int creat_flags,
309 				int page_size_log);
310 
311 static inline bool is_file_hugepages(struct file *file)
312 {
313 	if (file->f_op == &hugetlbfs_file_operations)
314 		return true;
315 
316 	return is_file_shm_hugepages(file);
317 }
318 
319 
320 #else /* !CONFIG_HUGETLBFS */
321 
322 #define is_file_hugepages(file)			false
323 static inline struct file *
324 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
325 		struct user_struct **user, int creat_flags,
326 		int page_size_log)
327 {
328 	return ERR_PTR(-ENOSYS);
329 }
330 
331 #endif /* !CONFIG_HUGETLBFS */
332 
333 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
334 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
335 					unsigned long len, unsigned long pgoff,
336 					unsigned long flags);
337 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
338 
339 #ifdef CONFIG_HUGETLB_PAGE
340 
341 #define HSTATE_NAME_LEN 32
342 /* Defines one hugetlb page size */
343 struct hstate {
344 	int next_nid_to_alloc;
345 	int next_nid_to_free;
346 	unsigned int order;
347 	unsigned long mask;
348 	unsigned long max_huge_pages;
349 	unsigned long nr_huge_pages;
350 	unsigned long free_huge_pages;
351 	unsigned long resv_huge_pages;
352 	unsigned long surplus_huge_pages;
353 	unsigned long nr_overcommit_huge_pages;
354 	struct list_head hugepage_activelist;
355 	struct list_head hugepage_freelists[MAX_NUMNODES];
356 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
357 	unsigned int free_huge_pages_node[MAX_NUMNODES];
358 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
359 #ifdef CONFIG_CGROUP_HUGETLB
360 	/* cgroup control files */
361 	struct cftype cgroup_files[5];
362 #endif
363 	char name[HSTATE_NAME_LEN];
364 };
365 
366 struct huge_bootmem_page {
367 	struct list_head list;
368 	struct hstate *hstate;
369 };
370 
371 struct page *alloc_huge_page(struct vm_area_struct *vma,
372 				unsigned long addr, int avoid_reserve);
373 struct page *alloc_huge_page_node(struct hstate *h, int nid);
374 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
375 				nodemask_t *nmask);
376 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
377 				unsigned long address);
378 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
379 				     int nid, nodemask_t *nmask);
380 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
381 			pgoff_t idx);
382 
383 /* arch callback */
384 int __init __alloc_bootmem_huge_page(struct hstate *h);
385 int __init alloc_bootmem_huge_page(struct hstate *h);
386 
387 void __init hugetlb_bad_size(void);
388 void __init hugetlb_add_hstate(unsigned order);
389 struct hstate *size_to_hstate(unsigned long size);
390 
391 #ifndef HUGE_MAX_HSTATE
392 #define HUGE_MAX_HSTATE 1
393 #endif
394 
395 extern struct hstate hstates[HUGE_MAX_HSTATE];
396 extern unsigned int default_hstate_idx;
397 
398 #define default_hstate (hstates[default_hstate_idx])
399 
400 static inline struct hstate *hstate_inode(struct inode *i)
401 {
402 	return HUGETLBFS_SB(i->i_sb)->hstate;
403 }
404 
405 static inline struct hstate *hstate_file(struct file *f)
406 {
407 	return hstate_inode(file_inode(f));
408 }
409 
410 static inline struct hstate *hstate_sizelog(int page_size_log)
411 {
412 	if (!page_size_log)
413 		return &default_hstate;
414 
415 	return size_to_hstate(1UL << page_size_log);
416 }
417 
418 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
419 {
420 	return hstate_file(vma->vm_file);
421 }
422 
423 static inline unsigned long huge_page_size(struct hstate *h)
424 {
425 	return (unsigned long)PAGE_SIZE << h->order;
426 }
427 
428 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
429 
430 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
431 
432 static inline unsigned long huge_page_mask(struct hstate *h)
433 {
434 	return h->mask;
435 }
436 
437 static inline unsigned int huge_page_order(struct hstate *h)
438 {
439 	return h->order;
440 }
441 
442 static inline unsigned huge_page_shift(struct hstate *h)
443 {
444 	return h->order + PAGE_SHIFT;
445 }
446 
447 static inline bool hstate_is_gigantic(struct hstate *h)
448 {
449 	return huge_page_order(h) >= MAX_ORDER;
450 }
451 
452 static inline unsigned int pages_per_huge_page(struct hstate *h)
453 {
454 	return 1 << h->order;
455 }
456 
457 static inline unsigned int blocks_per_huge_page(struct hstate *h)
458 {
459 	return huge_page_size(h) / 512;
460 }
461 
462 #include <asm/hugetlb.h>
463 
464 #ifndef arch_make_huge_pte
465 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
466 				       struct page *page, int writable)
467 {
468 	return entry;
469 }
470 #endif
471 
472 static inline struct hstate *page_hstate(struct page *page)
473 {
474 	VM_BUG_ON_PAGE(!PageHuge(page), page);
475 	return size_to_hstate(PAGE_SIZE << compound_order(page));
476 }
477 
478 static inline unsigned hstate_index_to_shift(unsigned index)
479 {
480 	return hstates[index].order + PAGE_SHIFT;
481 }
482 
483 static inline int hstate_index(struct hstate *h)
484 {
485 	return h - hstates;
486 }
487 
488 pgoff_t __basepage_index(struct page *page);
489 
490 /* Return page->index in PAGE_SIZE units */
491 static inline pgoff_t basepage_index(struct page *page)
492 {
493 	if (!PageCompound(page))
494 		return page->index;
495 
496 	return __basepage_index(page);
497 }
498 
499 extern int dissolve_free_huge_page(struct page *page);
500 extern int dissolve_free_huge_pages(unsigned long start_pfn,
501 				    unsigned long end_pfn);
502 
503 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
504 #ifndef arch_hugetlb_migration_supported
505 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
506 {
507 	if ((huge_page_shift(h) == PMD_SHIFT) ||
508 		(huge_page_shift(h) == PUD_SHIFT) ||
509 			(huge_page_shift(h) == PGDIR_SHIFT))
510 		return true;
511 	else
512 		return false;
513 }
514 #endif
515 #else
516 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
517 {
518 	return false;
519 }
520 #endif
521 
522 static inline bool hugepage_migration_supported(struct hstate *h)
523 {
524 	return arch_hugetlb_migration_supported(h);
525 }
526 
527 /*
528  * Movability check is different as compared to migration check.
529  * It determines whether or not a huge page should be placed on
530  * movable zone or not. Movability of any huge page should be
531  * required only if huge page size is supported for migration.
532  * There wont be any reason for the huge page to be movable if
533  * it is not migratable to start with. Also the size of the huge
534  * page should be large enough to be placed under a movable zone
535  * and still feasible enough to be migratable. Just the presence
536  * in movable zone does not make the migration feasible.
537  *
538  * So even though large huge page sizes like the gigantic ones
539  * are migratable they should not be movable because its not
540  * feasible to migrate them from movable zone.
541  */
542 static inline bool hugepage_movable_supported(struct hstate *h)
543 {
544 	if (!hugepage_migration_supported(h))
545 		return false;
546 
547 	if (hstate_is_gigantic(h))
548 		return false;
549 	return true;
550 }
551 
552 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
553 					   struct mm_struct *mm, pte_t *pte)
554 {
555 	if (huge_page_size(h) == PMD_SIZE)
556 		return pmd_lockptr(mm, (pmd_t *) pte);
557 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
558 	return &mm->page_table_lock;
559 }
560 
561 #ifndef hugepages_supported
562 /*
563  * Some platform decide whether they support huge pages at boot
564  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
565  * when there is no such support
566  */
567 #define hugepages_supported() (HPAGE_SHIFT != 0)
568 #endif
569 
570 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
571 
572 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
573 {
574 	atomic_long_add(l, &mm->hugetlb_usage);
575 }
576 
577 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
578 {
579 	atomic_long_sub(l, &mm->hugetlb_usage);
580 }
581 
582 #ifndef set_huge_swap_pte_at
583 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
584 					pte_t *ptep, pte_t pte, unsigned long sz)
585 {
586 	set_huge_pte_at(mm, addr, ptep, pte);
587 }
588 #endif
589 
590 #ifndef huge_ptep_modify_prot_start
591 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
592 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
593 						unsigned long addr, pte_t *ptep)
594 {
595 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
596 }
597 #endif
598 
599 #ifndef huge_ptep_modify_prot_commit
600 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
601 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
602 						unsigned long addr, pte_t *ptep,
603 						pte_t old_pte, pte_t pte)
604 {
605 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
606 }
607 #endif
608 
609 #else	/* CONFIG_HUGETLB_PAGE */
610 struct hstate {};
611 #define alloc_huge_page(v, a, r) NULL
612 #define alloc_huge_page_node(h, nid) NULL
613 #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
614 #define alloc_huge_page_vma(h, vma, address) NULL
615 #define alloc_bootmem_huge_page(h) NULL
616 #define hstate_file(f) NULL
617 #define hstate_sizelog(s) NULL
618 #define hstate_vma(v) NULL
619 #define hstate_inode(i) NULL
620 #define page_hstate(page) NULL
621 #define huge_page_size(h) PAGE_SIZE
622 #define huge_page_mask(h) PAGE_MASK
623 #define vma_kernel_pagesize(v) PAGE_SIZE
624 #define vma_mmu_pagesize(v) PAGE_SIZE
625 #define huge_page_order(h) 0
626 #define huge_page_shift(h) PAGE_SHIFT
627 static inline bool hstate_is_gigantic(struct hstate *h)
628 {
629 	return false;
630 }
631 
632 static inline unsigned int pages_per_huge_page(struct hstate *h)
633 {
634 	return 1;
635 }
636 
637 static inline unsigned hstate_index_to_shift(unsigned index)
638 {
639 	return 0;
640 }
641 
642 static inline int hstate_index(struct hstate *h)
643 {
644 	return 0;
645 }
646 
647 static inline pgoff_t basepage_index(struct page *page)
648 {
649 	return page->index;
650 }
651 
652 static inline int dissolve_free_huge_page(struct page *page)
653 {
654 	return 0;
655 }
656 
657 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
658 					   unsigned long end_pfn)
659 {
660 	return 0;
661 }
662 
663 static inline bool hugepage_migration_supported(struct hstate *h)
664 {
665 	return false;
666 }
667 
668 static inline bool hugepage_movable_supported(struct hstate *h)
669 {
670 	return false;
671 }
672 
673 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
674 					   struct mm_struct *mm, pte_t *pte)
675 {
676 	return &mm->page_table_lock;
677 }
678 
679 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
680 {
681 }
682 
683 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
684 {
685 }
686 
687 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
688 					pte_t *ptep, pte_t pte, unsigned long sz)
689 {
690 }
691 #endif	/* CONFIG_HUGETLB_PAGE */
692 
693 static inline spinlock_t *huge_pte_lock(struct hstate *h,
694 					struct mm_struct *mm, pte_t *pte)
695 {
696 	spinlock_t *ptl;
697 
698 	ptl = huge_pte_lockptr(h, mm, pte);
699 	spin_lock(ptl);
700 	return ptl;
701 }
702 
703 #endif /* _LINUX_HUGETLB_H */
704