1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * HugeTLB Vmemmap Optimization (HVO) 4 * 5 * Copyright (c) 2020, ByteDance. All rights reserved. 6 * 7 * Author: Muchun Song <songmuchun@bytedance.com> 8 */ 9 #ifndef _LINUX_HUGETLB_VMEMMAP_H 10 #define _LINUX_HUGETLB_VMEMMAP_H 11 #include <linux/hugetlb.h> 12 13 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 14 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head); 15 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head); 16 17 /* 18 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See 19 * Documentation/vm/vmemmap_dedup.rst. 20 */ 21 #define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE 22 23 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) 24 { 25 return pages_per_huge_page(h) * sizeof(struct page); 26 } 27 28 /* 29 * Return how many vmemmap size associated with a HugeTLB page that can be 30 * optimized and can be freed to the buddy allocator. 31 */ 32 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) 33 { 34 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE; 35 36 if (!is_power_of_2(sizeof(struct page))) 37 return 0; 38 return size > 0 ? size : 0; 39 } 40 #else 41 static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) 42 { 43 return 0; 44 } 45 46 static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) 47 { 48 } 49 50 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) 51 { 52 return 0; 53 } 54 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ 55 56 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h) 57 { 58 return hugetlb_vmemmap_optimizable_size(h) != 0; 59 } 60 #endif /* _LINUX_HUGETLB_VMEMMAP_H */ 61