page_alloc.c (cc638f329ef605f5c2a57b87dd8e584e9d5f4c2f) | page_alloc.c (8e57f8acbbd121ecfb0c9dc13b8b030f86c6bd3b) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds --- 680 unchanged lines hidden (view full) --- 689 set_compound_head(p, page); 690 } 691 atomic_set(compound_mapcount_ptr(page), -1); 692} 693 694#ifdef CONFIG_DEBUG_PAGEALLOC 695unsigned int _debug_guardpage_minorder; 696 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds --- 680 unchanged lines hidden (view full) --- 689 set_compound_head(p, page); 690 } 691 atomic_set(compound_mapcount_ptr(page), -1); 692} 693 694#ifdef CONFIG_DEBUG_PAGEALLOC 695unsigned int _debug_guardpage_minorder; 696 |
697#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT 698DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); 699#else | 697bool _debug_pagealloc_enabled_early __read_mostly 698 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 699EXPORT_SYMBOL(_debug_pagealloc_enabled_early); |
700DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); | 700DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); |
701#endif | |
702EXPORT_SYMBOL(_debug_pagealloc_enabled); 703 704DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 705 706static int __init early_debug_pagealloc(char *buf) 707{ | 701EXPORT_SYMBOL(_debug_pagealloc_enabled); 702 703DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 704 705static int __init early_debug_pagealloc(char *buf) 706{ |
708 bool enable = false; 709 710 if (kstrtobool(buf, &enable)) 711 return -EINVAL; 712 713 if (enable) 714 static_branch_enable(&_debug_pagealloc_enabled); 715 716 return 0; | 707 return kstrtobool(buf, &_debug_pagealloc_enabled_early); |
717} 718early_param("debug_pagealloc", early_debug_pagealloc); 719 | 708} 709early_param("debug_pagealloc", early_debug_pagealloc); 710 |
720static void init_debug_guardpage(void) | 711void init_debug_pagealloc(void) |
721{ 722 if (!debug_pagealloc_enabled()) 723 return; 724 | 712{ 713 if (!debug_pagealloc_enabled()) 714 return; 715 |
716 static_branch_enable(&_debug_pagealloc_enabled); 717 |
|
725 if (!debug_guardpage_minorder()) 726 return; 727 728 static_branch_enable(&_debug_guardpage_enabled); 729} 730 731static int __init debug_guardpage_minorder_setup(char *buf) 732{ --- 448 unchanged lines hidden (view full) --- 1181 kernel_poison_pages(page, 1 << order, 0); 1182 /* 1183 * arch_free_page() can make the page's contents inaccessible. s390 1184 * does this. So nothing which can access the page's contents should 1185 * happen after this. 1186 */ 1187 arch_free_page(page, order); 1188 | 718 if (!debug_guardpage_minorder()) 719 return; 720 721 static_branch_enable(&_debug_guardpage_enabled); 722} 723 724static int __init debug_guardpage_minorder_setup(char *buf) 725{ --- 448 unchanged lines hidden (view full) --- 1174 kernel_poison_pages(page, 1 << order, 0); 1175 /* 1176 * arch_free_page() can make the page's contents inaccessible. s390 1177 * does this. So nothing which can access the page's contents should 1178 * happen after this. 1179 */ 1180 arch_free_page(page, order); 1181 |
1189 if (debug_pagealloc_enabled()) | 1182 if (debug_pagealloc_enabled_static()) |
1190 kernel_map_pages(page, 1 << order, 0); 1191 1192 kasan_free_nondeferred_pages(page, order); 1193 1194 return true; 1195} 1196 1197#ifdef CONFIG_DEBUG_VM --- 4 unchanged lines hidden (view full) --- 1202 */ 1203static bool free_pcp_prepare(struct page *page) 1204{ 1205 return free_pages_prepare(page, 0, true); 1206} 1207 1208static bool bulkfree_pcp_prepare(struct page *page) 1209{ | 1183 kernel_map_pages(page, 1 << order, 0); 1184 1185 kasan_free_nondeferred_pages(page, order); 1186 1187 return true; 1188} 1189 1190#ifdef CONFIG_DEBUG_VM --- 4 unchanged lines hidden (view full) --- 1195 */ 1196static bool free_pcp_prepare(struct page *page) 1197{ 1198 return free_pages_prepare(page, 0, true); 1199} 1200 1201static bool bulkfree_pcp_prepare(struct page *page) 1202{ |
1210 if (debug_pagealloc_enabled()) | 1203 if (debug_pagealloc_enabled_static()) |
1211 return free_pages_check(page); 1212 else 1213 return false; 1214} 1215#else 1216/* 1217 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1218 * moving from pcp lists to free list in order to reduce overhead. With 1219 * debug_pagealloc enabled, they are checked also immediately when being freed 1220 * to the pcp lists. 1221 */ 1222static bool free_pcp_prepare(struct page *page) 1223{ | 1204 return free_pages_check(page); 1205 else 1206 return false; 1207} 1208#else 1209/* 1210 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1211 * moving from pcp lists to free list in order to reduce overhead. With 1212 * debug_pagealloc enabled, they are checked also immediately when being freed 1213 * to the pcp lists. 1214 */ 1215static bool free_pcp_prepare(struct page *page) 1216{ |
1224 if (debug_pagealloc_enabled()) | 1217 if (debug_pagealloc_enabled_static()) |
1225 return free_pages_prepare(page, 0, true); 1226 else 1227 return free_pages_prepare(page, 0, false); 1228} 1229 1230static bool bulkfree_pcp_prepare(struct page *page) 1231{ 1232 return free_pages_check(page); --- 735 unchanged lines hidden (view full) --- 1968 /* Discard memblock private memory */ 1969 memblock_discard(); 1970 1971 for_each_node_state(nid, N_MEMORY) 1972 shuffle_free_memory(NODE_DATA(nid)); 1973 1974 for_each_populated_zone(zone) 1975 set_zone_contiguous(zone); | 1218 return free_pages_prepare(page, 0, true); 1219 else 1220 return free_pages_prepare(page, 0, false); 1221} 1222 1223static bool bulkfree_pcp_prepare(struct page *page) 1224{ 1225 return free_pages_check(page); --- 735 unchanged lines hidden (view full) --- 1961 /* Discard memblock private memory */ 1962 memblock_discard(); 1963 1964 for_each_node_state(nid, N_MEMORY) 1965 shuffle_free_memory(NODE_DATA(nid)); 1966 1967 for_each_populated_zone(zone) 1968 set_zone_contiguous(zone); |
1976 1977#ifdef CONFIG_DEBUG_PAGEALLOC 1978 init_debug_guardpage(); 1979#endif | |
1980} 1981 1982#ifdef CONFIG_CMA 1983/* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1984void __init init_cma_reserved_pageblock(struct page *page) 1985{ 1986 unsigned i = pageblock_nr_pages; 1987 struct page *p = page; --- 113 unchanged lines hidden (view full) --- 2101#ifdef CONFIG_DEBUG_VM 2102/* 2103 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2104 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2105 * also checked when pcp lists are refilled from the free lists. 2106 */ 2107static inline bool check_pcp_refill(struct page *page) 2108{ | 1969} 1970 1971#ifdef CONFIG_CMA 1972/* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1973void __init init_cma_reserved_pageblock(struct page *page) 1974{ 1975 unsigned i = pageblock_nr_pages; 1976 struct page *p = page; --- 113 unchanged lines hidden (view full) --- 2090#ifdef CONFIG_DEBUG_VM 2091/* 2092 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2093 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2094 * also checked when pcp lists are refilled from the free lists. 2095 */ 2096static inline bool check_pcp_refill(struct page *page) 2097{ |
2109 if (debug_pagealloc_enabled()) | 2098 if (debug_pagealloc_enabled_static()) |
2110 return check_new_page(page); 2111 else 2112 return false; 2113} 2114 2115static inline bool check_new_pcp(struct page *page) 2116{ 2117 return check_new_page(page); --- 5 unchanged lines hidden (view full) --- 2123 * enabled, they are also checked when being allocated from the pcp lists. 2124 */ 2125static inline bool check_pcp_refill(struct page *page) 2126{ 2127 return check_new_page(page); 2128} 2129static inline bool check_new_pcp(struct page *page) 2130{ | 2099 return check_new_page(page); 2100 else 2101 return false; 2102} 2103 2104static inline bool check_new_pcp(struct page *page) 2105{ 2106 return check_new_page(page); --- 5 unchanged lines hidden (view full) --- 2112 * enabled, they are also checked when being allocated from the pcp lists. 2113 */ 2114static inline bool check_pcp_refill(struct page *page) 2115{ 2116 return check_new_page(page); 2117} 2118static inline bool check_new_pcp(struct page *page) 2119{ |
2131 if (debug_pagealloc_enabled()) | 2120 if (debug_pagealloc_enabled_static()) |
2132 return check_new_page(page); 2133 else 2134 return false; 2135} 2136#endif /* CONFIG_DEBUG_VM */ 2137 2138static bool check_new_pages(struct page *page, unsigned int order) 2139{ --- 10 unchanged lines hidden (view full) --- 2150 2151inline void post_alloc_hook(struct page *page, unsigned int order, 2152 gfp_t gfp_flags) 2153{ 2154 set_page_private(page, 0); 2155 set_page_refcounted(page); 2156 2157 arch_alloc_page(page, order); | 2121 return check_new_page(page); 2122 else 2123 return false; 2124} 2125#endif /* CONFIG_DEBUG_VM */ 2126 2127static bool check_new_pages(struct page *page, unsigned int order) 2128{ --- 10 unchanged lines hidden (view full) --- 2139 2140inline void post_alloc_hook(struct page *page, unsigned int order, 2141 gfp_t gfp_flags) 2142{ 2143 set_page_private(page, 0); 2144 set_page_refcounted(page); 2145 2146 arch_alloc_page(page, order); |
2158 if (debug_pagealloc_enabled()) | 2147 if (debug_pagealloc_enabled_static()) |
2159 kernel_map_pages(page, 1 << order, 1); 2160 kasan_alloc_pages(page, order); 2161 kernel_poison_pages(page, 1 << order, 1); 2162 set_page_owner(page, order, gfp_flags); 2163} 2164 2165static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2166 unsigned int alloc_flags) --- 6583 unchanged lines hidden --- | 2148 kernel_map_pages(page, 1 << order, 1); 2149 kasan_alloc_pages(page, order); 2150 kernel_poison_pages(page, 1 << order, 1); 2151 set_page_owner(page, order, gfp_flags); 2152} 2153 2154static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2155 unsigned int alloc_flags) --- 6583 unchanged lines hidden --- |