xref: /openbmc/linux/mm/hugetlb_vmemmap.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1f41f2ed4SMuchun Song // SPDX-License-Identifier: GPL-2.0
2f41f2ed4SMuchun Song /*
3dff03381SMuchun Song  * HugeTLB Vmemmap Optimization (HVO)
4f41f2ed4SMuchun Song  *
5dff03381SMuchun Song  * Copyright (c) 2020, ByteDance. All rights reserved.
6f41f2ed4SMuchun Song  *
7f41f2ed4SMuchun Song  *     Author: Muchun Song <songmuchun@bytedance.com>
8f41f2ed4SMuchun Song  */
9f41f2ed4SMuchun Song #ifndef _LINUX_HUGETLB_VMEMMAP_H
10f41f2ed4SMuchun Song #define _LINUX_HUGETLB_VMEMMAP_H
11f41f2ed4SMuchun Song #include <linux/hugetlb.h>
12f41f2ed4SMuchun Song 
1347010c04SMuchun Song #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
14*6213834cSMuchun Song int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
15*6213834cSMuchun Song void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
16b65d4adbSMuchun Song 
17b65d4adbSMuchun Song /*
18*6213834cSMuchun Song  * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
19*6213834cSMuchun Song  * Documentation/vm/vmemmap_dedup.rst.
20b65d4adbSMuchun Song  */
21*6213834cSMuchun Song #define HUGETLB_VMEMMAP_RESERVE_SIZE	PAGE_SIZE
22*6213834cSMuchun Song 
hugetlb_vmemmap_size(const struct hstate * h)23*6213834cSMuchun Song static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
24b65d4adbSMuchun Song {
25*6213834cSMuchun Song 	return pages_per_huge_page(h) * sizeof(struct page);
26*6213834cSMuchun Song }
27*6213834cSMuchun Song 
28*6213834cSMuchun Song /*
29*6213834cSMuchun Song  * Return how many vmemmap size associated with a HugeTLB page that can be
30*6213834cSMuchun Song  * optimized and can be freed to the buddy allocator.
31*6213834cSMuchun Song  */
hugetlb_vmemmap_optimizable_size(const struct hstate * h)32*6213834cSMuchun Song static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
33*6213834cSMuchun Song {
34*6213834cSMuchun Song 	int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
35*6213834cSMuchun Song 
36*6213834cSMuchun Song 	if (!is_power_of_2(sizeof(struct page)))
37*6213834cSMuchun Song 		return 0;
38*6213834cSMuchun Song 	return size > 0 ? size : 0;
39b65d4adbSMuchun Song }
40f41f2ed4SMuchun Song #else
hugetlb_vmemmap_restore(const struct hstate * h,struct page * head)41*6213834cSMuchun Song static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
42ad2fa371SMuchun Song {
43ad2fa371SMuchun Song 	return 0;
44ad2fa371SMuchun Song }
45ad2fa371SMuchun Song 
hugetlb_vmemmap_optimize(const struct hstate * h,struct page * head)46*6213834cSMuchun Song static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
47f41f2ed4SMuchun Song {
48f41f2ed4SMuchun Song }
49b65d4adbSMuchun Song 
hugetlb_vmemmap_optimizable_size(const struct hstate * h)50*6213834cSMuchun Song static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
51b65d4adbSMuchun Song {
52b65d4adbSMuchun Song 	return 0;
53b65d4adbSMuchun Song }
5447010c04SMuchun Song #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
55*6213834cSMuchun Song 
hugetlb_vmemmap_optimizable(const struct hstate * h)56*6213834cSMuchun Song static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
57*6213834cSMuchun Song {
58*6213834cSMuchun Song 	return hugetlb_vmemmap_optimizable_size(h) != 0;
59*6213834cSMuchun Song }
60f41f2ed4SMuchun Song #endif /* _LINUX_HUGETLB_VMEMMAP_H */
61