1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/mm.h> 3 #include <linux/mmzone.h> 4 #include <linux/memblock.h> 5 #include <linux/page_ext.h> 6 #include <linux/memory.h> 7 #include <linux/vmalloc.h> 8 #include <linux/kmemleak.h> 9 #include <linux/page_owner.h> 10 #include <linux/page_idle.h> 11 12 /* 13 * struct page extension 14 * 15 * This is the feature to manage memory for extended data per page. 16 * 17 * Until now, we must modify struct page itself to store extra data per page. 18 * This requires rebuilding the kernel and it is really time consuming process. 19 * And, sometimes, rebuild is impossible due to third party module dependency. 20 * At last, enlarging struct page could cause un-wanted system behaviour change. 21 * 22 * This feature is intended to overcome above mentioned problems. This feature 23 * allocates memory for extended data per page in certain place rather than 24 * the struct page itself. This memory can be accessed by the accessor 25 * functions provided by this code. During the boot process, it checks whether 26 * allocation of huge chunk of memory is needed or not. If not, it avoids 27 * allocating memory at all. With this advantage, we can include this feature 28 * into the kernel in default and can avoid rebuild and solve related problems. 29 * 30 * To help these things to work well, there are two callbacks for clients. One 31 * is the need callback which is mandatory if user wants to avoid useless 32 * memory allocation at boot-time. The other is optional, init callback, which 33 * is used to do proper initialization after memory is allocated. 34 * 35 * The need callback is used to decide whether extended memory allocation is 36 * needed or not. Sometimes users want to deactivate some features in this 37 * boot and extra memory would be unnecessary. In this case, to avoid 38 * allocating huge chunk of memory, each clients represent their need of 39 * extra memory through the need callback. If one of the need callbacks 40 * returns true, it means that someone needs extra memory so that 41 * page extension core should allocates memory for page extension. If 42 * none of need callbacks return true, memory isn't needed at all in this boot 43 * and page extension core can skip to allocate memory. As result, 44 * none of memory is wasted. 45 * 46 * When need callback returns true, page_ext checks if there is a request for 47 * extra memory through size in struct page_ext_operations. If it is non-zero, 48 * extra space is allocated for each page_ext entry and offset is returned to 49 * user through offset in struct page_ext_operations. 50 * 51 * The init callback is used to do proper initialization after page extension 52 * is completely initialized. In sparse memory system, extra memory is 53 * allocated some time later than memmap is allocated. In other words, lifetime 54 * of memory for page extension isn't same with memmap for struct page. 55 * Therefore, clients can't store extra data until page extension is 56 * initialized, even if pages are allocated and used freely. This could 57 * cause inadequate state of extra data per page, so, to prevent it, client 58 * can utilize this callback to initialize the state of it correctly. 59 */ 60 61 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) 62 static bool need_page_idle(void) 63 { 64 return true; 65 } 66 struct page_ext_operations page_idle_ops = { 67 .need = need_page_idle, 68 }; 69 #endif 70 71 static struct page_ext_operations *page_ext_ops[] = { 72 #ifdef CONFIG_PAGE_OWNER 73 &page_owner_ops, 74 #endif 75 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) 76 &page_idle_ops, 77 #endif 78 }; 79 80 unsigned long page_ext_size = sizeof(struct page_ext); 81 82 static unsigned long total_usage; 83 84 static bool __init invoke_need_callbacks(void) 85 { 86 int i; 87 int entries = ARRAY_SIZE(page_ext_ops); 88 bool need = false; 89 90 for (i = 0; i < entries; i++) { 91 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { 92 page_ext_ops[i]->offset = page_ext_size; 93 page_ext_size += page_ext_ops[i]->size; 94 need = true; 95 } 96 } 97 98 return need; 99 } 100 101 static void __init invoke_init_callbacks(void) 102 { 103 int i; 104 int entries = ARRAY_SIZE(page_ext_ops); 105 106 for (i = 0; i < entries; i++) { 107 if (page_ext_ops[i]->init) 108 page_ext_ops[i]->init(); 109 } 110 } 111 112 #ifndef CONFIG_SPARSEMEM 113 void __init page_ext_init_flatmem_late(void) 114 { 115 invoke_init_callbacks(); 116 } 117 #endif 118 119 static inline struct page_ext *get_entry(void *base, unsigned long index) 120 { 121 return base + page_ext_size * index; 122 } 123 124 #ifndef CONFIG_SPARSEMEM 125 126 127 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) 128 { 129 pgdat->node_page_ext = NULL; 130 } 131 132 struct page_ext *lookup_page_ext(const struct page *page) 133 { 134 unsigned long pfn = page_to_pfn(page); 135 unsigned long index; 136 struct page_ext *base; 137 138 base = NODE_DATA(page_to_nid(page))->node_page_ext; 139 /* 140 * The sanity checks the page allocator does upon freeing a 141 * page can reach here before the page_ext arrays are 142 * allocated when feeding a range of pages to the allocator 143 * for the first time during bootup or memory hotplug. 144 */ 145 if (unlikely(!base)) 146 return NULL; 147 index = pfn - round_down(node_start_pfn(page_to_nid(page)), 148 MAX_ORDER_NR_PAGES); 149 return get_entry(base, index); 150 } 151 152 static int __init alloc_node_page_ext(int nid) 153 { 154 struct page_ext *base; 155 unsigned long table_size; 156 unsigned long nr_pages; 157 158 nr_pages = NODE_DATA(nid)->node_spanned_pages; 159 if (!nr_pages) 160 return 0; 161 162 /* 163 * Need extra space if node range is not aligned with 164 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm 165 * checks buddy's status, range could be out of exact node range. 166 */ 167 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || 168 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) 169 nr_pages += MAX_ORDER_NR_PAGES; 170 171 table_size = page_ext_size * nr_pages; 172 173 base = memblock_alloc_try_nid( 174 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 175 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 176 if (!base) 177 return -ENOMEM; 178 NODE_DATA(nid)->node_page_ext = base; 179 total_usage += table_size; 180 return 0; 181 } 182 183 void __init page_ext_init_flatmem(void) 184 { 185 186 int nid, fail; 187 188 if (!invoke_need_callbacks()) 189 return; 190 191 for_each_online_node(nid) { 192 fail = alloc_node_page_ext(nid); 193 if (fail) 194 goto fail; 195 } 196 pr_info("allocated %ld bytes of page_ext\n", total_usage); 197 return; 198 199 fail: 200 pr_crit("allocation of page_ext failed.\n"); 201 panic("Out of memory"); 202 } 203 204 #else /* CONFIG_FLATMEM */ 205 206 struct page_ext *lookup_page_ext(const struct page *page) 207 { 208 unsigned long pfn = page_to_pfn(page); 209 struct mem_section *section = __pfn_to_section(pfn); 210 /* 211 * The sanity checks the page allocator does upon freeing a 212 * page can reach here before the page_ext arrays are 213 * allocated when feeding a range of pages to the allocator 214 * for the first time during bootup or memory hotplug. 215 */ 216 if (!section->page_ext) 217 return NULL; 218 return get_entry(section->page_ext, pfn); 219 } 220 221 static void *__meminit alloc_page_ext(size_t size, int nid) 222 { 223 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; 224 void *addr = NULL; 225 226 addr = alloc_pages_exact_nid(nid, size, flags); 227 if (addr) { 228 kmemleak_alloc(addr, size, 1, flags); 229 return addr; 230 } 231 232 addr = vzalloc_node(size, nid); 233 234 return addr; 235 } 236 237 static int __meminit init_section_page_ext(unsigned long pfn, int nid) 238 { 239 struct mem_section *section; 240 struct page_ext *base; 241 unsigned long table_size; 242 243 section = __pfn_to_section(pfn); 244 245 if (section->page_ext) 246 return 0; 247 248 table_size = page_ext_size * PAGES_PER_SECTION; 249 base = alloc_page_ext(table_size, nid); 250 251 /* 252 * The value stored in section->page_ext is (base - pfn) 253 * and it does not point to the memory block allocated above, 254 * causing kmemleak false positives. 255 */ 256 kmemleak_not_leak(base); 257 258 if (!base) { 259 pr_err("page ext allocation failure\n"); 260 return -ENOMEM; 261 } 262 263 /* 264 * The passed "pfn" may not be aligned to SECTION. For the calculation 265 * we need to apply a mask. 266 */ 267 pfn &= PAGE_SECTION_MASK; 268 section->page_ext = (void *)base - page_ext_size * pfn; 269 total_usage += table_size; 270 return 0; 271 } 272 #ifdef CONFIG_MEMORY_HOTPLUG 273 static void free_page_ext(void *addr) 274 { 275 if (is_vmalloc_addr(addr)) { 276 vfree(addr); 277 } else { 278 struct page *page = virt_to_page(addr); 279 size_t table_size; 280 281 table_size = page_ext_size * PAGES_PER_SECTION; 282 283 BUG_ON(PageReserved(page)); 284 kmemleak_free(addr); 285 free_pages_exact(addr, table_size); 286 } 287 } 288 289 static void __free_page_ext(unsigned long pfn) 290 { 291 struct mem_section *ms; 292 struct page_ext *base; 293 294 ms = __pfn_to_section(pfn); 295 if (!ms || !ms->page_ext) 296 return; 297 base = get_entry(ms->page_ext, pfn); 298 free_page_ext(base); 299 ms->page_ext = NULL; 300 } 301 302 static int __meminit online_page_ext(unsigned long start_pfn, 303 unsigned long nr_pages, 304 int nid) 305 { 306 unsigned long start, end, pfn; 307 int fail = 0; 308 309 start = SECTION_ALIGN_DOWN(start_pfn); 310 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 311 312 if (nid == NUMA_NO_NODE) { 313 /* 314 * In this case, "nid" already exists and contains valid memory. 315 * "start_pfn" passed to us is a pfn which is an arg for 316 * online__pages(), and start_pfn should exist. 317 */ 318 nid = pfn_to_nid(start_pfn); 319 VM_BUG_ON(!node_state(nid, N_ONLINE)); 320 } 321 322 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) 323 fail = init_section_page_ext(pfn, nid); 324 if (!fail) 325 return 0; 326 327 /* rollback */ 328 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 329 __free_page_ext(pfn); 330 331 return -ENOMEM; 332 } 333 334 static int __meminit offline_page_ext(unsigned long start_pfn, 335 unsigned long nr_pages, int nid) 336 { 337 unsigned long start, end, pfn; 338 339 start = SECTION_ALIGN_DOWN(start_pfn); 340 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 341 342 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 343 __free_page_ext(pfn); 344 return 0; 345 346 } 347 348 static int __meminit page_ext_callback(struct notifier_block *self, 349 unsigned long action, void *arg) 350 { 351 struct memory_notify *mn = arg; 352 int ret = 0; 353 354 switch (action) { 355 case MEM_GOING_ONLINE: 356 ret = online_page_ext(mn->start_pfn, 357 mn->nr_pages, mn->status_change_nid); 358 break; 359 case MEM_OFFLINE: 360 offline_page_ext(mn->start_pfn, 361 mn->nr_pages, mn->status_change_nid); 362 break; 363 case MEM_CANCEL_ONLINE: 364 offline_page_ext(mn->start_pfn, 365 mn->nr_pages, mn->status_change_nid); 366 break; 367 case MEM_GOING_OFFLINE: 368 break; 369 case MEM_ONLINE: 370 case MEM_CANCEL_OFFLINE: 371 break; 372 } 373 374 return notifier_from_errno(ret); 375 } 376 377 #endif 378 379 void __init page_ext_init(void) 380 { 381 unsigned long pfn; 382 int nid; 383 384 if (!invoke_need_callbacks()) 385 return; 386 387 for_each_node_state(nid, N_MEMORY) { 388 unsigned long start_pfn, end_pfn; 389 390 start_pfn = node_start_pfn(nid); 391 end_pfn = node_end_pfn(nid); 392 /* 393 * start_pfn and end_pfn may not be aligned to SECTION and the 394 * page->flags of out of node pages are not initialized. So we 395 * scan [start_pfn, the biggest section's pfn < end_pfn) here. 396 */ 397 for (pfn = start_pfn; pfn < end_pfn; 398 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { 399 400 if (!pfn_valid(pfn)) 401 continue; 402 /* 403 * Nodes's pfns can be overlapping. 404 * We know some arch can have a nodes layout such as 405 * -------------pfn--------------> 406 * N0 | N1 | N2 | N0 | N1 | N2|.... 407 */ 408 if (pfn_to_nid(pfn) != nid) 409 continue; 410 if (init_section_page_ext(pfn, nid)) 411 goto oom; 412 cond_resched(); 413 } 414 } 415 hotplug_memory_notifier(page_ext_callback, 0); 416 pr_info("allocated %ld bytes of page_ext\n", total_usage); 417 invoke_init_callbacks(); 418 return; 419 420 oom: 421 panic("Out of memory"); 422 } 423 424 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) 425 { 426 } 427 428 #endif 429