page_alloc.c (b5810039a54e5babf428e9a1e89fc1940fabff11) | page_alloc.c (4c21e2f2441dc5fbb957b030333f5a3f2d02dea7) |
---|---|
1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie --- 140 unchanged lines hidden (view full) --- 149 int nr_pages = 1 << order; 150 151 page[1].mapping = NULL; 152 page[1].index = order; 153 for (i = 0; i < nr_pages; i++) { 154 struct page *p = page + i; 155 156 SetPageCompound(p); | 1/* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie --- 140 unchanged lines hidden (view full) --- 149 int nr_pages = 1 << order; 150 151 page[1].mapping = NULL; 152 page[1].index = order; 153 for (i = 0; i < nr_pages; i++) { 154 struct page *p = page + i; 155 156 SetPageCompound(p); |
157 p->private = (unsigned long)page; | 157 set_page_private(p, (unsigned long)page); |
158 } 159} 160 161static void destroy_compound_page(struct page *page, unsigned long order) 162{ 163 int i; 164 int nr_pages = 1 << order; 165 166 if (!PageCompound(page)) 167 return; 168 169 if (page[1].index != order) 170 bad_page(__FUNCTION__, page); 171 172 for (i = 0; i < nr_pages; i++) { 173 struct page *p = page + i; 174 175 if (!PageCompound(p)) 176 bad_page(__FUNCTION__, page); | 158 } 159} 160 161static void destroy_compound_page(struct page *page, unsigned long order) 162{ 163 int i; 164 int nr_pages = 1 << order; 165 166 if (!PageCompound(page)) 167 return; 168 169 if (page[1].index != order) 170 bad_page(__FUNCTION__, page); 171 172 for (i = 0; i < nr_pages; i++) { 173 struct page *p = page + i; 174 175 if (!PageCompound(p)) 176 bad_page(__FUNCTION__, page); |
177 if (p->private != (unsigned long)page) | 177 if (page_private(p) != (unsigned long)page) |
178 bad_page(__FUNCTION__, page); 179 ClearPageCompound(p); 180 } 181} 182#endif /* CONFIG_HUGETLB_PAGE */ 183 184/* 185 * function for dealing with page's order in buddy system. 186 * zone->lock is already acquired when we use these. 187 * So, we don't need atomic page->flags operations here. 188 */ 189static inline unsigned long page_order(struct page *page) { | 178 bad_page(__FUNCTION__, page); 179 ClearPageCompound(p); 180 } 181} 182#endif /* CONFIG_HUGETLB_PAGE */ 183 184/* 185 * function for dealing with page's order in buddy system. 186 * zone->lock is already acquired when we use these. 187 * So, we don't need atomic page->flags operations here. 188 */ 189static inline unsigned long page_order(struct page *page) { |
190 return page->private; | 190 return page_private(page); |
191} 192 193static inline void set_page_order(struct page *page, int order) { | 191} 192 193static inline void set_page_order(struct page *page, int order) { |
194 page->private = order; | 194 set_page_private(page, order); |
195 __SetPagePrivate(page); 196} 197 198static inline void rmv_page_order(struct page *page) 199{ 200 __ClearPagePrivate(page); | 195 __SetPagePrivate(page); 196} 197 198static inline void rmv_page_order(struct page *page) 199{ 200 __ClearPagePrivate(page); |
201 page->private = 0; | 201 set_page_private(page, 0); |
202} 203 204/* 205 * Locate the struct page for both the matching buddy in our 206 * pair (buddy1) and the combined O(n+1) page they form (page). 207 * 208 * 1) Any buddy B1 will have an order O twin B2 which satisfies 209 * the following equation: --- 23 unchanged lines hidden (view full) --- 233} 234 235/* 236 * This function checks whether a page is free && is the buddy 237 * we can do coalesce a page and its buddy if 238 * (a) the buddy is free && 239 * (b) the buddy is on the buddy system && 240 * (c) a page and its buddy have the same order. | 202} 203 204/* 205 * Locate the struct page for both the matching buddy in our 206 * pair (buddy1) and the combined O(n+1) page they form (page). 207 * 208 * 1) Any buddy B1 will have an order O twin B2 which satisfies 209 * the following equation: --- 23 unchanged lines hidden (view full) --- 233} 234 235/* 236 * This function checks whether a page is free && is the buddy 237 * we can do coalesce a page and its buddy if 238 * (a) the buddy is free && 239 * (b) the buddy is on the buddy system && 240 * (c) a page and its buddy have the same order. |
241 * for recording page's order, we use page->private and PG_private. | 241 * for recording page's order, we use page_private(page) and PG_private. |
242 * 243 */ 244static inline int page_is_buddy(struct page *page, int order) 245{ 246 if (PagePrivate(page) && 247 (page_order(page) == order) && 248 page_count(page) == 0) 249 return 1; --- 9 unchanged lines hidden (view full) --- 259 * units of memory (here, pages), and each level above it describes 260 * pairs of units from the levels below, hence, "buddies". 261 * At a high level, all that happens here is marking the table entry 262 * at the bottom level available, and propagating the changes upward 263 * as necessary, plus some accounting needed to play nicely with other 264 * parts of the VM system. 265 * At each level, we keep a list of pages, which are heads of continuous 266 * free pages of length of (1 << order) and marked with PG_Private.Page's | 242 * 243 */ 244static inline int page_is_buddy(struct page *page, int order) 245{ 246 if (PagePrivate(page) && 247 (page_order(page) == order) && 248 page_count(page) == 0) 249 return 1; --- 9 unchanged lines hidden (view full) --- 259 * units of memory (here, pages), and each level above it describes 260 * pairs of units from the levels below, hence, "buddies". 261 * At a high level, all that happens here is marking the table entry 262 * at the bottom level available, and propagating the changes upward 263 * as necessary, plus some accounting needed to play nicely with other 264 * parts of the VM system. 265 * At each level, we keep a list of pages, which are heads of continuous 266 * free pages of length of (1 << order) and marked with PG_Private.Page's |
267 * order is recorded in page->private field. | 267 * order is recorded in page_private(page) field. |
268 * So when we are allocating or freeing one, we can derive the state of the 269 * other. That is, if we allocate a small block, and both were 270 * free, the remainder of the region must be split into blocks. 271 * If a block is freed, and its buddy is also free, then this 272 * triggers coalescing into a block of larger size. 273 * 274 * -- wli 275 */ --- 182 unchanged lines hidden (view full) --- 458 1 << PG_swapcache | 459 1 << PG_writeback | 460 1 << PG_reserved ))) 461 bad_page(__FUNCTION__, page); 462 463 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 464 1 << PG_referenced | 1 << PG_arch_1 | 465 1 << PG_checked | 1 << PG_mappedtodisk); | 268 * So when we are allocating or freeing one, we can derive the state of the 269 * other. That is, if we allocate a small block, and both were 270 * free, the remainder of the region must be split into blocks. 271 * If a block is freed, and its buddy is also free, then this 272 * triggers coalescing into a block of larger size. 273 * 274 * -- wli 275 */ --- 182 unchanged lines hidden (view full) --- 458 1 << PG_swapcache | 459 1 << PG_writeback | 460 1 << PG_reserved ))) 461 bad_page(__FUNCTION__, page); 462 463 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 464 1 << PG_referenced | 1 << PG_arch_1 | 465 1 << PG_checked | 1 << PG_mappedtodisk); |
466 page->private = 0; | 466 set_page_private(page, 0); |
467 set_page_refs(page, order); 468 kernel_map_pages(page, 1 << order, 1); 469} 470 471/* 472 * Do the hard work of removing an element from the buddy allocator. 473 * Call me with the zone->lock already held. 474 */ --- 2100 unchanged lines hidden --- | 467 set_page_refs(page, order); 468 kernel_map_pages(page, 1 << order, 1); 469} 470 471/* 472 * Do the hard work of removing an element from the buddy allocator. 473 * Call me with the zone->lock already held. 474 */ --- 2100 unchanged lines hidden --- |