page_alloc.c (942baad211336efefb93a8369478888ab845c450) page_alloc.c (7a3b835371883558eb63e069d891bd87f562380d)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds

--- 368 unchanged lines hidden (view full) ---

377/*
378 * During boot we initialize deferred pages on-demand, as needed, but once
379 * page_alloc_init_late() has finished, the deferred pages are all initialized,
380 * and we can permanently disable that path.
381 */
382static DEFINE_STATIC_KEY_TRUE(deferred_pages);
383
384/*
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds

--- 368 unchanged lines hidden (view full) ---

377/*
378 * During boot we initialize deferred pages on-demand, as needed, but once
379 * page_alloc_init_late() has finished, the deferred pages are all initialized,
380 * and we can permanently disable that path.
381 */
382static DEFINE_STATIC_KEY_TRUE(deferred_pages);
383
384/*
385 * Calling kasan_free_pages() only after deferred memory initialization
385 * Calling kasan_poison_pages() only after deferred memory initialization
386 * has completed. Poisoning pages during deferred memory init will greatly
387 * lengthen the process and cause problem in large memory systems as the
388 * deferred pages initialization is done with interrupt disabled.
389 *
390 * Assuming that there will be no reference to those newly initialized
391 * pages before they are ever allocated, this should have no effect on
392 * KASAN memory tracking as the poison will be properly inserted at page
393 * allocation time. The only corner case is when pages are allocated by
394 * on-demand allocation and then freed again before the deferred pages
395 * initialization is done, but this is not likely to happen.
396 */
386 * has completed. Poisoning pages during deferred memory init will greatly
387 * lengthen the process and cause problem in large memory systems as the
388 * deferred pages initialization is done with interrupt disabled.
389 *
390 * Assuming that there will be no reference to those newly initialized
391 * pages before they are ever allocated, this should have no effect on
392 * KASAN memory tracking as the poison will be properly inserted at page
393 * allocation time. The only corner case is when pages are allocated by
394 * on-demand allocation and then freed again before the deferred pages
395 * initialization is done, but this is not likely to happen.
396 */
397static inline void kasan_free_nondeferred_pages(struct page *page, int order,
398 bool init, fpi_t fpi_flags)
397static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
399{
398{
400 if (static_branch_unlikely(&deferred_pages))
401 return;
402 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
403 (fpi_flags & FPI_SKIP_KASAN_POISON))
404 return;
405 kasan_free_pages(page, order, init);
399 return static_branch_unlikely(&deferred_pages) ||
400 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
401 (fpi_flags & FPI_SKIP_KASAN_POISON));
406}
407
408/* Returns true if the struct page for the pfn is uninitialised */
409static inline bool __meminit early_page_uninitialised(unsigned long pfn)
410{
411 int nid = early_pfn_to_nid(pfn);
412
413 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)

--- 34 unchanged lines hidden (view full) ---

448 if ((nr_initialised > PAGES_PER_SECTION) &&
449 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
450 NODE_DATA(nid)->first_deferred_pfn = pfn;
451 return true;
452 }
453 return false;
454}
455#else
402}
403
404/* Returns true if the struct page for the pfn is uninitialised */
405static inline bool __meminit early_page_uninitialised(unsigned long pfn)
406{
407 int nid = early_pfn_to_nid(pfn);
408
409 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)

--- 34 unchanged lines hidden (view full) ---

444 if ((nr_initialised > PAGES_PER_SECTION) &&
445 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
446 NODE_DATA(nid)->first_deferred_pfn = pfn;
447 return true;
448 }
449 return false;
450}
451#else
456static inline void kasan_free_nondeferred_pages(struct page *page, int order,
457 bool init, fpi_t fpi_flags)
452static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
458{
453{
459 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
460 (fpi_flags & FPI_SKIP_KASAN_POISON))
461 return;
462 kasan_free_pages(page, order, init);
454 return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
455 (fpi_flags & FPI_SKIP_KASAN_POISON));
463}
464
465static inline bool early_page_uninitialised(unsigned long pfn)
466{
467 return false;
468}
469
470static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)

--- 769 unchanged lines hidden (view full) ---

1240 }
1241 kasan_enable_current();
1242}
1243
1244static __always_inline bool free_pages_prepare(struct page *page,
1245 unsigned int order, bool check_free, fpi_t fpi_flags)
1246{
1247 int bad = 0;
456}
457
458static inline bool early_page_uninitialised(unsigned long pfn)
459{
460 return false;
461}
462
463static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)

--- 769 unchanged lines hidden (view full) ---

1233 }
1234 kasan_enable_current();
1235}
1236
1237static __always_inline bool free_pages_prepare(struct page *page,
1238 unsigned int order, bool check_free, fpi_t fpi_flags)
1239{
1240 int bad = 0;
1248 bool init;
1241 bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
1249
1250 VM_BUG_ON_PAGE(PageTail(page), page);
1251
1252 trace_mm_page_free(page, order);
1253
1254 if (unlikely(PageHWPoison(page)) && !order) {
1255 /*
1256 * Do not let hwpoison pages hit pcplists/buddy

--- 52 unchanged lines hidden (view full) ---

1309 /*
1310 * As memory initialization might be integrated into KASAN,
1311 * kasan_free_pages and kernel_init_free_pages must be
1312 * kept together to avoid discrepancies in behavior.
1313 *
1314 * With hardware tag-based KASAN, memory tags must be set before the
1315 * page becomes unavailable via debug_pagealloc or arch_free_page.
1316 */
1242
1243 VM_BUG_ON_PAGE(PageTail(page), page);
1244
1245 trace_mm_page_free(page, order);
1246
1247 if (unlikely(PageHWPoison(page)) && !order) {
1248 /*
1249 * Do not let hwpoison pages hit pcplists/buddy

--- 52 unchanged lines hidden (view full) ---

1302 /*
1303 * As memory initialization might be integrated into KASAN,
1304 * kasan_free_pages and kernel_init_free_pages must be
1305 * kept together to avoid discrepancies in behavior.
1306 *
1307 * With hardware tag-based KASAN, memory tags must be set before the
1308 * page becomes unavailable via debug_pagealloc or arch_free_page.
1309 */
1317 init = want_init_on_free();
1318 if (init && !kasan_has_integrated_init())
1319 kernel_init_free_pages(page, 1 << order);
1320 kasan_free_nondeferred_pages(page, order, init, fpi_flags);
1310 if (kasan_has_integrated_init()) {
1311 if (!skip_kasan_poison)
1312 kasan_free_pages(page, order);
1313 } else {
1314 bool init = want_init_on_free();
1321
1315
1316 if (init)
1317 kernel_init_free_pages(page, 1 << order);
1318 if (!skip_kasan_poison)
1319 kasan_poison_pages(page, order, init);
1320 }
1321
1322 /*
1323 * arch_free_page() can make the page's contents inaccessible. s390
1324 * does this. So nothing which can access the page's contents should
1325 * happen after this.
1326 */
1327 arch_free_page(page, order);
1328
1329 debug_pagealloc_unmap_pages(page, 1 << order);

--- 989 unchanged lines hidden (view full) ---

2319 }
2320
2321 return false;
2322}
2323
2324inline void post_alloc_hook(struct page *page, unsigned int order,
2325 gfp_t gfp_flags)
2326{
1322 /*
1323 * arch_free_page() can make the page's contents inaccessible. s390
1324 * does this. So nothing which can access the page's contents should
1325 * happen after this.
1326 */
1327 arch_free_page(page, order);
1328
1329 debug_pagealloc_unmap_pages(page, 1 << order);

--- 989 unchanged lines hidden (view full) ---

2319 }
2320
2321 return false;
2322}
2323
2324inline void post_alloc_hook(struct page *page, unsigned int order,
2325 gfp_t gfp_flags)
2326{
2327 bool init;
2328
2329 set_page_private(page, 0);
2330 set_page_refcounted(page);
2331
2332 arch_alloc_page(page, order);
2333 debug_pagealloc_map_pages(page, 1 << order);
2334
2335 /*
2336 * Page unpoisoning must happen before memory initialization.
2337 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2338 * allocations and the page unpoisoning code will complain.
2339 */
2340 kernel_unpoison_pages(page, 1 << order);
2341
2342 /*
2343 * As memory initialization might be integrated into KASAN,
2344 * kasan_alloc_pages and kernel_init_free_pages must be
2345 * kept together to avoid discrepancies in behavior.
2346 */
2327 set_page_private(page, 0);
2328 set_page_refcounted(page);
2329
2330 arch_alloc_page(page, order);
2331 debug_pagealloc_map_pages(page, 1 << order);
2332
2333 /*
2334 * Page unpoisoning must happen before memory initialization.
2335 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2336 * allocations and the page unpoisoning code will complain.
2337 */
2338 kernel_unpoison_pages(page, 1 << order);
2339
2340 /*
2341 * As memory initialization might be integrated into KASAN,
2342 * kasan_alloc_pages and kernel_init_free_pages must be
2343 * kept together to avoid discrepancies in behavior.
2344 */
2347 init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2348 kasan_alloc_pages(page, order, init);
2349 if (init && !kasan_has_integrated_init())
2350 kernel_init_free_pages(page, 1 << order);
2345 if (kasan_has_integrated_init()) {
2346 kasan_alloc_pages(page, order, gfp_flags);
2347 } else {
2348 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2351
2349
2350 kasan_unpoison_pages(page, order, init);
2351 if (init)
2352 kernel_init_free_pages(page, 1 << order);
2353 }
2354
2352 set_page_owner(page, order, gfp_flags);
2353}
2354
2355static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2356 unsigned int alloc_flags)
2357{
2358 post_alloc_hook(page, order, gfp_flags);
2359

--- 6811 unchanged lines hidden ---
2355 set_page_owner(page, order, gfp_flags);
2356}
2357
2358static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2359 unsigned int alloc_flags)
2360{
2361 post_alloc_hook(page, order, gfp_flags);
2362

--- 6811 unchanged lines hidden ---