1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/mm.h> 3 #include <linux/mmzone.h> 4 #include <linux/memblock.h> 5 #include <linux/page_ext.h> 6 #include <linux/memory.h> 7 #include <linux/vmalloc.h> 8 #include <linux/kmemleak.h> 9 #include <linux/page_owner.h> 10 #include <linux/page_idle.h> 11 12 /* 13 * struct page extension 14 * 15 * This is the feature to manage memory for extended data per page. 16 * 17 * Until now, we must modify struct page itself to store extra data per page. 18 * This requires rebuilding the kernel and it is really time consuming process. 19 * And, sometimes, rebuild is impossible due to third party module dependency. 20 * At last, enlarging struct page could cause un-wanted system behaviour change. 21 * 22 * This feature is intended to overcome above mentioned problems. This feature 23 * allocates memory for extended data per page in certain place rather than 24 * the struct page itself. This memory can be accessed by the accessor 25 * functions provided by this code. During the boot process, it checks whether 26 * allocation of huge chunk of memory is needed or not. If not, it avoids 27 * allocating memory at all. With this advantage, we can include this feature 28 * into the kernel in default and can avoid rebuild and solve related problems. 29 * 30 * To help these things to work well, there are two callbacks for clients. One 31 * is the need callback which is mandatory if user wants to avoid useless 32 * memory allocation at boot-time. The other is optional, init callback, which 33 * is used to do proper initialization after memory is allocated. 34 * 35 * The need callback is used to decide whether extended memory allocation is 36 * needed or not. Sometimes users want to deactivate some features in this 37 * boot and extra memory would be unneccessary. In this case, to avoid 38 * allocating huge chunk of memory, each clients represent their need of 39 * extra memory through the need callback. If one of the need callbacks 40 * returns true, it means that someone needs extra memory so that 41 * page extension core should allocates memory for page extension. If 42 * none of need callbacks return true, memory isn't needed at all in this boot 43 * and page extension core can skip to allocate memory. As result, 44 * none of memory is wasted. 45 * 46 * When need callback returns true, page_ext checks if there is a request for 47 * extra memory through size in struct page_ext_operations. If it is non-zero, 48 * extra space is allocated for each page_ext entry and offset is returned to 49 * user through offset in struct page_ext_operations. 50 * 51 * The init callback is used to do proper initialization after page extension 52 * is completely initialized. In sparse memory system, extra memory is 53 * allocated some time later than memmap is allocated. In other words, lifetime 54 * of memory for page extension isn't same with memmap for struct page. 55 * Therefore, clients can't store extra data until page extension is 56 * initialized, even if pages are allocated and used freely. This could 57 * cause inadequate state of extra data per page, so, to prevent it, client 58 * can utilize this callback to initialize the state of it correctly. 59 */ 60 61 static struct page_ext_operations *page_ext_ops[] = { 62 #ifdef CONFIG_DEBUG_PAGEALLOC 63 &debug_guardpage_ops, 64 #endif 65 #ifdef CONFIG_PAGE_OWNER 66 &page_owner_ops, 67 #endif 68 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) 69 &page_idle_ops, 70 #endif 71 }; 72 73 static unsigned long total_usage; 74 static unsigned long extra_mem; 75 76 static bool __init invoke_need_callbacks(void) 77 { 78 int i; 79 int entries = ARRAY_SIZE(page_ext_ops); 80 bool need = false; 81 82 for (i = 0; i < entries; i++) { 83 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { 84 page_ext_ops[i]->offset = sizeof(struct page_ext) + 85 extra_mem; 86 extra_mem += page_ext_ops[i]->size; 87 need = true; 88 } 89 } 90 91 return need; 92 } 93 94 static void __init invoke_init_callbacks(void) 95 { 96 int i; 97 int entries = ARRAY_SIZE(page_ext_ops); 98 99 for (i = 0; i < entries; i++) { 100 if (page_ext_ops[i]->init) 101 page_ext_ops[i]->init(); 102 } 103 } 104 105 static unsigned long get_entry_size(void) 106 { 107 return sizeof(struct page_ext) + extra_mem; 108 } 109 110 static inline struct page_ext *get_entry(void *base, unsigned long index) 111 { 112 return base + get_entry_size() * index; 113 } 114 115 #if !defined(CONFIG_SPARSEMEM) 116 117 118 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) 119 { 120 pgdat->node_page_ext = NULL; 121 } 122 123 struct page_ext *lookup_page_ext(const struct page *page) 124 { 125 unsigned long pfn = page_to_pfn(page); 126 unsigned long index; 127 struct page_ext *base; 128 129 base = NODE_DATA(page_to_nid(page))->node_page_ext; 130 /* 131 * The sanity checks the page allocator does upon freeing a 132 * page can reach here before the page_ext arrays are 133 * allocated when feeding a range of pages to the allocator 134 * for the first time during bootup or memory hotplug. 135 */ 136 if (unlikely(!base)) 137 return NULL; 138 index = pfn - round_down(node_start_pfn(page_to_nid(page)), 139 MAX_ORDER_NR_PAGES); 140 return get_entry(base, index); 141 } 142 143 static int __init alloc_node_page_ext(int nid) 144 { 145 struct page_ext *base; 146 unsigned long table_size; 147 unsigned long nr_pages; 148 149 nr_pages = NODE_DATA(nid)->node_spanned_pages; 150 if (!nr_pages) 151 return 0; 152 153 /* 154 * Need extra space if node range is not aligned with 155 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm 156 * checks buddy's status, range could be out of exact node range. 157 */ 158 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || 159 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) 160 nr_pages += MAX_ORDER_NR_PAGES; 161 162 table_size = get_entry_size() * nr_pages; 163 164 base = memblock_alloc_try_nid( 165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 166 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 167 if (!base) 168 return -ENOMEM; 169 NODE_DATA(nid)->node_page_ext = base; 170 total_usage += table_size; 171 return 0; 172 } 173 174 void __init page_ext_init_flatmem(void) 175 { 176 177 int nid, fail; 178 179 if (!invoke_need_callbacks()) 180 return; 181 182 for_each_online_node(nid) { 183 fail = alloc_node_page_ext(nid); 184 if (fail) 185 goto fail; 186 } 187 pr_info("allocated %ld bytes of page_ext\n", total_usage); 188 invoke_init_callbacks(); 189 return; 190 191 fail: 192 pr_crit("allocation of page_ext failed.\n"); 193 panic("Out of memory"); 194 } 195 196 #else /* CONFIG_FLAT_NODE_MEM_MAP */ 197 198 struct page_ext *lookup_page_ext(const struct page *page) 199 { 200 unsigned long pfn = page_to_pfn(page); 201 struct mem_section *section = __pfn_to_section(pfn); 202 /* 203 * The sanity checks the page allocator does upon freeing a 204 * page can reach here before the page_ext arrays are 205 * allocated when feeding a range of pages to the allocator 206 * for the first time during bootup or memory hotplug. 207 */ 208 if (!section->page_ext) 209 return NULL; 210 return get_entry(section->page_ext, pfn); 211 } 212 213 static void *__meminit alloc_page_ext(size_t size, int nid) 214 { 215 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; 216 void *addr = NULL; 217 218 addr = alloc_pages_exact_nid(nid, size, flags); 219 if (addr) { 220 kmemleak_alloc(addr, size, 1, flags); 221 return addr; 222 } 223 224 addr = vzalloc_node(size, nid); 225 226 return addr; 227 } 228 229 static int __meminit init_section_page_ext(unsigned long pfn, int nid) 230 { 231 struct mem_section *section; 232 struct page_ext *base; 233 unsigned long table_size; 234 235 section = __pfn_to_section(pfn); 236 237 if (section->page_ext) 238 return 0; 239 240 table_size = get_entry_size() * PAGES_PER_SECTION; 241 base = alloc_page_ext(table_size, nid); 242 243 /* 244 * The value stored in section->page_ext is (base - pfn) 245 * and it does not point to the memory block allocated above, 246 * causing kmemleak false positives. 247 */ 248 kmemleak_not_leak(base); 249 250 if (!base) { 251 pr_err("page ext allocation failure\n"); 252 return -ENOMEM; 253 } 254 255 /* 256 * The passed "pfn" may not be aligned to SECTION. For the calculation 257 * we need to apply a mask. 258 */ 259 pfn &= PAGE_SECTION_MASK; 260 section->page_ext = (void *)base - get_entry_size() * pfn; 261 total_usage += table_size; 262 return 0; 263 } 264 #ifdef CONFIG_MEMORY_HOTPLUG 265 static void free_page_ext(void *addr) 266 { 267 if (is_vmalloc_addr(addr)) { 268 vfree(addr); 269 } else { 270 struct page *page = virt_to_page(addr); 271 size_t table_size; 272 273 table_size = get_entry_size() * PAGES_PER_SECTION; 274 275 BUG_ON(PageReserved(page)); 276 kmemleak_free(addr); 277 free_pages_exact(addr, table_size); 278 } 279 } 280 281 static void __free_page_ext(unsigned long pfn) 282 { 283 struct mem_section *ms; 284 struct page_ext *base; 285 286 ms = __pfn_to_section(pfn); 287 if (!ms || !ms->page_ext) 288 return; 289 base = get_entry(ms->page_ext, pfn); 290 free_page_ext(base); 291 ms->page_ext = NULL; 292 } 293 294 static int __meminit online_page_ext(unsigned long start_pfn, 295 unsigned long nr_pages, 296 int nid) 297 { 298 unsigned long start, end, pfn; 299 int fail = 0; 300 301 start = SECTION_ALIGN_DOWN(start_pfn); 302 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 303 304 if (nid == NUMA_NO_NODE) { 305 /* 306 * In this case, "nid" already exists and contains valid memory. 307 * "start_pfn" passed to us is a pfn which is an arg for 308 * online__pages(), and start_pfn should exist. 309 */ 310 nid = pfn_to_nid(start_pfn); 311 VM_BUG_ON(!node_state(nid, N_ONLINE)); 312 } 313 314 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 315 if (!pfn_present(pfn)) 316 continue; 317 fail = init_section_page_ext(pfn, nid); 318 } 319 if (!fail) 320 return 0; 321 322 /* rollback */ 323 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 324 __free_page_ext(pfn); 325 326 return -ENOMEM; 327 } 328 329 static int __meminit offline_page_ext(unsigned long start_pfn, 330 unsigned long nr_pages, int nid) 331 { 332 unsigned long start, end, pfn; 333 334 start = SECTION_ALIGN_DOWN(start_pfn); 335 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 336 337 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 338 __free_page_ext(pfn); 339 return 0; 340 341 } 342 343 static int __meminit page_ext_callback(struct notifier_block *self, 344 unsigned long action, void *arg) 345 { 346 struct memory_notify *mn = arg; 347 int ret = 0; 348 349 switch (action) { 350 case MEM_GOING_ONLINE: 351 ret = online_page_ext(mn->start_pfn, 352 mn->nr_pages, mn->status_change_nid); 353 break; 354 case MEM_OFFLINE: 355 offline_page_ext(mn->start_pfn, 356 mn->nr_pages, mn->status_change_nid); 357 break; 358 case MEM_CANCEL_ONLINE: 359 offline_page_ext(mn->start_pfn, 360 mn->nr_pages, mn->status_change_nid); 361 break; 362 case MEM_GOING_OFFLINE: 363 break; 364 case MEM_ONLINE: 365 case MEM_CANCEL_OFFLINE: 366 break; 367 } 368 369 return notifier_from_errno(ret); 370 } 371 372 #endif 373 374 void __init page_ext_init(void) 375 { 376 unsigned long pfn; 377 int nid; 378 379 if (!invoke_need_callbacks()) 380 return; 381 382 for_each_node_state(nid, N_MEMORY) { 383 unsigned long start_pfn, end_pfn; 384 385 start_pfn = node_start_pfn(nid); 386 end_pfn = node_end_pfn(nid); 387 /* 388 * start_pfn and end_pfn may not be aligned to SECTION and the 389 * page->flags of out of node pages are not initialized. So we 390 * scan [start_pfn, the biggest section's pfn < end_pfn) here. 391 */ 392 for (pfn = start_pfn; pfn < end_pfn; 393 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { 394 395 if (!pfn_valid(pfn)) 396 continue; 397 /* 398 * Nodes's pfns can be overlapping. 399 * We know some arch can have a nodes layout such as 400 * -------------pfn--------------> 401 * N0 | N1 | N2 | N0 | N1 | N2|.... 402 */ 403 if (pfn_to_nid(pfn) != nid) 404 continue; 405 if (init_section_page_ext(pfn, nid)) 406 goto oom; 407 cond_resched(); 408 } 409 } 410 hotplug_memory_notifier(page_ext_callback, 0); 411 pr_info("allocated %ld bytes of page_ext\n", total_usage); 412 invoke_init_callbacks(); 413 return; 414 415 oom: 416 panic("Out of memory"); 417 } 418 419 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) 420 { 421 } 422 423 #endif 424