xref: /openbmc/linux/include/linux/hugetlb.h (revision 9125f19b)
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3 
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
6 #include <linux/fs.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11 #include <asm/pgtable.h>
12 
13 struct ctl_table;
14 struct user_struct;
15 struct mmu_gather;
16 
17 #ifdef CONFIG_HUGETLB_PAGE
18 
19 #include <linux/mempolicy.h>
20 #include <linux/shm.h>
21 #include <asm/tlbflush.h>
22 
23 struct hugepage_subpool {
24 	spinlock_t lock;
25 	long count;
26 	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
27 	long used_hpages;	/* Used count against maximum, includes */
28 				/* both alloced and reserved pages. */
29 	struct hstate *hstate;
30 	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
31 	long rsv_hpages;	/* Pages reserved against global pool to */
32 				/* sasitfy minimum size. */
33 };
34 
35 struct resv_map {
36 	struct kref refs;
37 	spinlock_t lock;
38 	struct list_head regions;
39 	long adds_in_progress;
40 	struct list_head region_cache;
41 	long region_cache_count;
42 };
43 extern struct resv_map *resv_map_alloc(void);
44 void resv_map_release(struct kref *ref);
45 
46 extern spinlock_t hugetlb_lock;
47 extern int hugetlb_max_hstate __read_mostly;
48 #define for_each_hstate(h) \
49 	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
50 
51 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
52 						long min_hpages);
53 void hugepage_put_subpool(struct hugepage_subpool *spool);
54 
55 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
56 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
59 
60 #ifdef CONFIG_NUMA
61 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
62 					void __user *, size_t *, loff_t *);
63 #endif
64 
65 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
66 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
67 			 struct page **, struct vm_area_struct **,
68 			 unsigned long *, unsigned long *, long, unsigned int);
69 void unmap_hugepage_range(struct vm_area_struct *,
70 			  unsigned long, unsigned long, struct page *);
71 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
72 			  struct vm_area_struct *vma,
73 			  unsigned long start, unsigned long end,
74 			  struct page *ref_page);
75 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
76 				unsigned long start, unsigned long end,
77 				struct page *ref_page);
78 void hugetlb_report_meminfo(struct seq_file *);
79 int hugetlb_report_node_meminfo(int, char *);
80 void hugetlb_show_meminfo(void);
81 unsigned long hugetlb_total_pages(void);
82 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
83 			unsigned long address, unsigned int flags);
84 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
85 						struct vm_area_struct *vma,
86 						vm_flags_t vm_flags);
87 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
88 						long freed);
89 int dequeue_hwpoisoned_huge_page(struct page *page);
90 bool isolate_huge_page(struct page *page, struct list_head *list);
91 void putback_active_hugepage(struct page *page);
92 void free_huge_page(struct page *page);
93 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
94 extern struct mutex *hugetlb_fault_mutex_table;
95 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
96 				struct vm_area_struct *vma,
97 				struct address_space *mapping,
98 				pgoff_t idx, unsigned long address);
99 
100 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
101 
102 extern int hugepages_treat_as_movable;
103 extern int sysctl_hugetlb_shm_group;
104 extern struct list_head huge_boot_pages;
105 
106 /* arch callbacks */
107 
108 pte_t *huge_pte_alloc(struct mm_struct *mm,
109 			unsigned long addr, unsigned long sz);
110 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
111 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
112 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
113 			      int write);
114 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
115 				pmd_t *pmd, int flags);
116 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
117 				pud_t *pud, int flags);
118 int pmd_huge(pmd_t pmd);
119 int pud_huge(pud_t pmd);
120 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
121 		unsigned long address, unsigned long end, pgprot_t newprot);
122 
123 #else /* !CONFIG_HUGETLB_PAGE */
124 
125 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
126 {
127 }
128 
129 static inline unsigned long hugetlb_total_pages(void)
130 {
131 	return 0;
132 }
133 
134 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)	({ BUG(); 0; })
135 #define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
136 #define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
137 static inline void hugetlb_report_meminfo(struct seq_file *m)
138 {
139 }
140 #define hugetlb_report_node_meminfo(n, buf)	0
141 static inline void hugetlb_show_meminfo(void)
142 {
143 }
144 #define follow_huge_pmd(mm, addr, pmd, flags)	NULL
145 #define follow_huge_pud(mm, addr, pud, flags)	NULL
146 #define prepare_hugepage_range(file, addr, len)	(-EINVAL)
147 #define pmd_huge(x)	0
148 #define pud_huge(x)	0
149 #define is_hugepage_only_range(mm, addr, len)	0
150 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
151 #define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
152 #define huge_pte_offset(mm, address)	0
153 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
154 {
155 	return 0;
156 }
157 
158 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
159 {
160 	return false;
161 }
162 #define putback_active_hugepage(p)	do {} while (0)
163 
164 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
165 		unsigned long address, unsigned long end, pgprot_t newprot)
166 {
167 	return 0;
168 }
169 
170 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
171 			struct vm_area_struct *vma, unsigned long start,
172 			unsigned long end, struct page *ref_page)
173 {
174 	BUG();
175 }
176 
177 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
178 			struct vm_area_struct *vma, unsigned long start,
179 			unsigned long end, struct page *ref_page)
180 {
181 	BUG();
182 }
183 
184 #endif /* !CONFIG_HUGETLB_PAGE */
185 /*
186  * hugepages at page global directory. If arch support
187  * hugepages at pgd level, they need to define this.
188  */
189 #ifndef pgd_huge
190 #define pgd_huge(x)	0
191 #endif
192 
193 #ifndef pgd_write
194 static inline int pgd_write(pgd_t pgd)
195 {
196 	BUG();
197 	return 0;
198 }
199 #endif
200 
201 #ifndef pud_write
202 static inline int pud_write(pud_t pud)
203 {
204 	BUG();
205 	return 0;
206 }
207 #endif
208 
209 #ifndef is_hugepd
210 /*
211  * Some architectures requires a hugepage directory format that is
212  * required to support multiple hugepage sizes. For example
213  * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
214  * introduced the same on powerpc. This allows for a more flexible hugepage
215  * pagetable layout.
216  */
217 typedef struct { unsigned long pd; } hugepd_t;
218 #define is_hugepd(hugepd) (0)
219 #define __hugepd(x) ((hugepd_t) { (x) })
220 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 			      unsigned pdshift, unsigned long end,
222 			      int write, struct page **pages, int *nr)
223 {
224 	return 0;
225 }
226 #else
227 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
228 		       unsigned pdshift, unsigned long end,
229 		       int write, struct page **pages, int *nr);
230 #endif
231 
232 #define HUGETLB_ANON_FILE "anon_hugepage"
233 
234 enum {
235 	/*
236 	 * The file will be used as an shm file so shmfs accounting rules
237 	 * apply
238 	 */
239 	HUGETLB_SHMFS_INODE     = 1,
240 	/*
241 	 * The file is being created on the internal vfs mount and shmfs
242 	 * accounting rules do not apply
243 	 */
244 	HUGETLB_ANONHUGE_INODE  = 2,
245 };
246 
247 #ifdef CONFIG_HUGETLBFS
248 struct hugetlbfs_sb_info {
249 	long	max_inodes;   /* inodes allowed */
250 	long	free_inodes;  /* inodes free */
251 	spinlock_t	stat_lock;
252 	struct hstate *hstate;
253 	struct hugepage_subpool *spool;
254 };
255 
256 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
257 {
258 	return sb->s_fs_info;
259 }
260 
261 extern const struct file_operations hugetlbfs_file_operations;
262 extern const struct vm_operations_struct hugetlb_vm_ops;
263 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
264 				struct user_struct **user, int creat_flags,
265 				int page_size_log);
266 
267 static inline bool is_file_hugepages(struct file *file)
268 {
269 	if (file->f_op == &hugetlbfs_file_operations)
270 		return true;
271 
272 	return is_file_shm_hugepages(file);
273 }
274 
275 
276 #else /* !CONFIG_HUGETLBFS */
277 
278 #define is_file_hugepages(file)			false
279 static inline struct file *
280 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
281 		struct user_struct **user, int creat_flags,
282 		int page_size_log)
283 {
284 	return ERR_PTR(-ENOSYS);
285 }
286 
287 #endif /* !CONFIG_HUGETLBFS */
288 
289 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
290 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
291 					unsigned long len, unsigned long pgoff,
292 					unsigned long flags);
293 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
294 
295 #ifdef CONFIG_HUGETLB_PAGE
296 
297 #define HSTATE_NAME_LEN 32
298 /* Defines one hugetlb page size */
299 struct hstate {
300 	int next_nid_to_alloc;
301 	int next_nid_to_free;
302 	unsigned int order;
303 	unsigned long mask;
304 	unsigned long max_huge_pages;
305 	unsigned long nr_huge_pages;
306 	unsigned long free_huge_pages;
307 	unsigned long resv_huge_pages;
308 	unsigned long surplus_huge_pages;
309 	unsigned long nr_overcommit_huge_pages;
310 	struct list_head hugepage_activelist;
311 	struct list_head hugepage_freelists[MAX_NUMNODES];
312 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
313 	unsigned int free_huge_pages_node[MAX_NUMNODES];
314 	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
315 #ifdef CONFIG_CGROUP_HUGETLB
316 	/* cgroup control files */
317 	struct cftype cgroup_files[5];
318 #endif
319 	char name[HSTATE_NAME_LEN];
320 };
321 
322 struct huge_bootmem_page {
323 	struct list_head list;
324 	struct hstate *hstate;
325 #ifdef CONFIG_HIGHMEM
326 	phys_addr_t phys;
327 #endif
328 };
329 
330 struct page *alloc_huge_page(struct vm_area_struct *vma,
331 				unsigned long addr, int avoid_reserve);
332 struct page *alloc_huge_page_node(struct hstate *h, int nid);
333 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
334 				unsigned long addr, int avoid_reserve);
335 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
336 			pgoff_t idx);
337 
338 /* arch callback */
339 int __init alloc_bootmem_huge_page(struct hstate *h);
340 
341 void __init hugetlb_add_hstate(unsigned order);
342 struct hstate *size_to_hstate(unsigned long size);
343 
344 #ifndef HUGE_MAX_HSTATE
345 #define HUGE_MAX_HSTATE 1
346 #endif
347 
348 extern struct hstate hstates[HUGE_MAX_HSTATE];
349 extern unsigned int default_hstate_idx;
350 
351 #define default_hstate (hstates[default_hstate_idx])
352 
353 static inline struct hstate *hstate_inode(struct inode *i)
354 {
355 	struct hugetlbfs_sb_info *hsb;
356 	hsb = HUGETLBFS_SB(i->i_sb);
357 	return hsb->hstate;
358 }
359 
360 static inline struct hstate *hstate_file(struct file *f)
361 {
362 	return hstate_inode(file_inode(f));
363 }
364 
365 static inline struct hstate *hstate_sizelog(int page_size_log)
366 {
367 	if (!page_size_log)
368 		return &default_hstate;
369 
370 	return size_to_hstate(1UL << page_size_log);
371 }
372 
373 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
374 {
375 	return hstate_file(vma->vm_file);
376 }
377 
378 static inline unsigned long huge_page_size(struct hstate *h)
379 {
380 	return (unsigned long)PAGE_SIZE << h->order;
381 }
382 
383 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
384 
385 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
386 
387 static inline unsigned long huge_page_mask(struct hstate *h)
388 {
389 	return h->mask;
390 }
391 
392 static inline unsigned int huge_page_order(struct hstate *h)
393 {
394 	return h->order;
395 }
396 
397 static inline unsigned huge_page_shift(struct hstate *h)
398 {
399 	return h->order + PAGE_SHIFT;
400 }
401 
402 static inline bool hstate_is_gigantic(struct hstate *h)
403 {
404 	return huge_page_order(h) >= MAX_ORDER;
405 }
406 
407 static inline unsigned int pages_per_huge_page(struct hstate *h)
408 {
409 	return 1 << h->order;
410 }
411 
412 static inline unsigned int blocks_per_huge_page(struct hstate *h)
413 {
414 	return huge_page_size(h) / 512;
415 }
416 
417 #include <asm/hugetlb.h>
418 
419 #ifndef arch_make_huge_pte
420 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
421 				       struct page *page, int writable)
422 {
423 	return entry;
424 }
425 #endif
426 
427 static inline struct hstate *page_hstate(struct page *page)
428 {
429 	VM_BUG_ON_PAGE(!PageHuge(page), page);
430 	return size_to_hstate(PAGE_SIZE << compound_order(page));
431 }
432 
433 static inline unsigned hstate_index_to_shift(unsigned index)
434 {
435 	return hstates[index].order + PAGE_SHIFT;
436 }
437 
438 static inline int hstate_index(struct hstate *h)
439 {
440 	return h - hstates;
441 }
442 
443 pgoff_t __basepage_index(struct page *page);
444 
445 /* Return page->index in PAGE_SIZE units */
446 static inline pgoff_t basepage_index(struct page *page)
447 {
448 	if (!PageCompound(page))
449 		return page->index;
450 
451 	return __basepage_index(page);
452 }
453 
454 extern void dissolve_free_huge_pages(unsigned long start_pfn,
455 				     unsigned long end_pfn);
456 static inline int hugepage_migration_supported(struct hstate *h)
457 {
458 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
459 	return huge_page_shift(h) == PMD_SHIFT;
460 #else
461 	return 0;
462 #endif
463 }
464 
465 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
466 					   struct mm_struct *mm, pte_t *pte)
467 {
468 	if (huge_page_size(h) == PMD_SIZE)
469 		return pmd_lockptr(mm, (pmd_t *) pte);
470 	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
471 	return &mm->page_table_lock;
472 }
473 
474 #ifndef hugepages_supported
475 /*
476  * Some platform decide whether they support huge pages at boot
477  * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
478  * when there is no such support
479  */
480 #define hugepages_supported() (HPAGE_SHIFT != 0)
481 #endif
482 
483 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
484 
485 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
486 {
487 	atomic_long_add(l, &mm->hugetlb_usage);
488 }
489 
490 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
491 {
492 	atomic_long_sub(l, &mm->hugetlb_usage);
493 }
494 #else	/* CONFIG_HUGETLB_PAGE */
495 struct hstate {};
496 #define alloc_huge_page(v, a, r) NULL
497 #define alloc_huge_page_node(h, nid) NULL
498 #define alloc_huge_page_noerr(v, a, r) NULL
499 #define alloc_bootmem_huge_page(h) NULL
500 #define hstate_file(f) NULL
501 #define hstate_sizelog(s) NULL
502 #define hstate_vma(v) NULL
503 #define hstate_inode(i) NULL
504 #define page_hstate(page) NULL
505 #define huge_page_size(h) PAGE_SIZE
506 #define huge_page_mask(h) PAGE_MASK
507 #define vma_kernel_pagesize(v) PAGE_SIZE
508 #define vma_mmu_pagesize(v) PAGE_SIZE
509 #define huge_page_order(h) 0
510 #define huge_page_shift(h) PAGE_SHIFT
511 static inline unsigned int pages_per_huge_page(struct hstate *h)
512 {
513 	return 1;
514 }
515 #define hstate_index_to_shift(index) 0
516 #define hstate_index(h) 0
517 
518 static inline pgoff_t basepage_index(struct page *page)
519 {
520 	return page->index;
521 }
522 #define dissolve_free_huge_pages(s, e)	do {} while (0)
523 #define hugepage_migration_supported(h)	0
524 
525 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
526 					   struct mm_struct *mm, pte_t *pte)
527 {
528 	return &mm->page_table_lock;
529 }
530 
531 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
532 {
533 }
534 
535 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
536 {
537 }
538 #endif	/* CONFIG_HUGETLB_PAGE */
539 
540 static inline spinlock_t *huge_pte_lock(struct hstate *h,
541 					struct mm_struct *mm, pte_t *pte)
542 {
543 	spinlock_t *ptl;
544 
545 	ptl = huge_pte_lockptr(h, mm, pte);
546 	spin_lock(ptl);
547 	return ptl;
548 }
549 
550 #endif /* _LINUX_HUGETLB_H */
551