page_alloc.c (e325c90ffc13b698fa2814102e05275b21c26bec) | page_alloc.c (748446bb6b5a9390b546af38ec899c868a9dbcf0) |
---|---|
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie --- 1194 unchanged lines hidden (view full) --- 1203 split_page(virt_to_page(page[0].shadow), order); 1204#endif 1205 1206 for (i = 1; i < (1 << order); i++) 1207 set_page_refcounted(page + i); 1208} 1209 1210/* | 1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie --- 1194 unchanged lines hidden (view full) --- 1203 split_page(virt_to_page(page[0].shadow), order); 1204#endif 1205 1206 for (i = 1; i < (1 << order); i++) 1207 set_page_refcounted(page + i); 1208} 1209 1210/* |
1211 * Similar to split_page except the page is already free. As this is only 1212 * being used for migration, the migratetype of the block also changes. 1213 * As this is called with interrupts disabled, the caller is responsible 1214 * for calling arch_alloc_page() and kernel_map_page() after interrupts 1215 * are enabled. 1216 * 1217 * Note: this is probably too low level an operation for use in drivers. 1218 * Please consult with lkml before using this in your driver. 1219 */ 1220int split_free_page(struct page *page) 1221{ 1222 unsigned int order; 1223 unsigned long watermark; 1224 struct zone *zone; 1225 1226 BUG_ON(!PageBuddy(page)); 1227 1228 zone = page_zone(page); 1229 order = page_order(page); 1230 1231 /* Obey watermarks as if the page was being allocated */ 1232 watermark = low_wmark_pages(zone) + (1 << order); 1233 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1234 return 0; 1235 1236 /* Remove page from free list */ 1237 list_del(&page->lru); 1238 zone->free_area[order].nr_free--; 1239 rmv_page_order(page); 1240 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); 1241 1242 /* Split into individual pages */ 1243 set_page_refcounted(page); 1244 split_page(page, order); 1245 1246 if (order >= pageblock_order - 1) { 1247 struct page *endpage = page + (1 << order) - 1; 1248 for (; page < endpage; page += pageblock_nr_pages) 1249 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1250 } 1251 1252 return 1 << order; 1253} 1254 1255/* |
|
1211 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1212 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1213 * or two. 1214 */ 1215static inline 1216struct page *buffered_rmqueue(struct zone *preferred_zone, 1217 struct zone *zone, int order, gfp_t gfp_flags, 1218 int migratetype) --- 4075 unchanged lines hidden --- | 1256 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1257 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1258 * or two. 1259 */ 1260static inline 1261struct page *buffered_rmqueue(struct zone *preferred_zone, 1262 struct zone *zone, int order, gfp_t gfp_flags, 1263 int migratetype) --- 4075 unchanged lines hidden --- |