hugetlb_vmemmap.c (b65d4adbc0f0d4619f61ee9d8126bc5005b78802) | hugetlb_vmemmap.c (ad2fa3717b74994a22519dbe045757135db00dbb) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Free some vmemmap pages of HugeTLB 4 * 5 * Copyright (c) 2020, Bytedance. All rights reserved. 6 * 7 * Author: Muchun Song <songmuchun@bytedance.com> 8 * --- 171 unchanged lines hidden (view full) --- 180#define RESERVE_VMEMMAP_NR 2U 181#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) 182 183static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h) 184{ 185 return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT; 186} 187 | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Free some vmemmap pages of HugeTLB 4 * 5 * Copyright (c) 2020, Bytedance. All rights reserved. 6 * 7 * Author: Muchun Song <songmuchun@bytedance.com> 8 * --- 171 unchanged lines hidden (view full) --- 180#define RESERVE_VMEMMAP_NR 2U 181#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) 182 183static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h) 184{ 185 return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT; 186} 187 |
188/* 189 * Previously discarded vmemmap pages will be allocated and remapping 190 * after this function returns zero. 191 */ 192int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) 193{ 194 int ret; 195 unsigned long vmemmap_addr = (unsigned long)head; 196 unsigned long vmemmap_end, vmemmap_reuse; 197 198 if (!HPageVmemmapOptimized(head)) 199 return 0; 200 201 vmemmap_addr += RESERVE_VMEMMAP_SIZE; 202 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); 203 vmemmap_reuse = vmemmap_addr - PAGE_SIZE; 204 /* 205 * The pages which the vmemmap virtual address range [@vmemmap_addr, 206 * @vmemmap_end) are mapped to are freed to the buddy allocator, and 207 * the range is mapped to the page which @vmemmap_reuse is mapped to. 208 * When a HugeTLB page is freed to the buddy allocator, previously 209 * discarded vmemmap pages must be allocated and remapping. 210 */ 211 ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse, 212 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); 213 214 if (!ret) 215 ClearHPageVmemmapOptimized(head); 216 217 return ret; 218} 219 |
|
188void free_huge_page_vmemmap(struct hstate *h, struct page *head) 189{ 190 unsigned long vmemmap_addr = (unsigned long)head; 191 unsigned long vmemmap_end, vmemmap_reuse; 192 193 if (!free_vmemmap_pages_per_hpage(h)) 194 return; 195 196 vmemmap_addr += RESERVE_VMEMMAP_SIZE; 197 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); 198 vmemmap_reuse = vmemmap_addr - PAGE_SIZE; 199 200 /* 201 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end) 202 * to the page which @vmemmap_reuse is mapped to, then free the pages 203 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to. 204 */ 205 vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse); | 220void free_huge_page_vmemmap(struct hstate *h, struct page *head) 221{ 222 unsigned long vmemmap_addr = (unsigned long)head; 223 unsigned long vmemmap_end, vmemmap_reuse; 224 225 if (!free_vmemmap_pages_per_hpage(h)) 226 return; 227 228 vmemmap_addr += RESERVE_VMEMMAP_SIZE; 229 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); 230 vmemmap_reuse = vmemmap_addr - PAGE_SIZE; 231 232 /* 233 * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end) 234 * to the page which @vmemmap_reuse is mapped to, then free the pages 235 * which the range [@vmemmap_addr, @vmemmap_end] is mapped to. 236 */ 237 vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse); |
238 239 SetHPageVmemmapOptimized(head); |
|
206} | 240} |