page_alloc.c (84097518d1ecd2330f9488e4c2d09953a3340e74) page_alloc.c (7835e98b2e3c66dba79cb0ff8ebb90a2fe030c29)
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 428 unchanged lines hidden (view full) ---

437/*
438 * permit the bootmem allocator to evade page validation on high-order frees
439 */
440void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
441{
442 if (order == 0) {
443 __ClearPageReserved(page);
444 set_page_count(page, 0);
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie

--- 428 unchanged lines hidden (view full) ---

437/*
438 * permit the bootmem allocator to evade page validation on high-order frees
439 */
440void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
441{
442 if (order == 0) {
443 __ClearPageReserved(page);
444 set_page_count(page, 0);
445 set_page_refs(page, 0);
445 set_page_refcounted(page);
446 __free_page(page);
447 } else {
448 int loop;
449
450 prefetchw(page);
451 for (loop = 0; loop < BITS_PER_LONG; loop++) {
452 struct page *p = &page[loop];
453
454 if (loop + 1 < BITS_PER_LONG)
455 prefetchw(p + 1);
456 __ClearPageReserved(p);
457 set_page_count(p, 0);
458 }
459
446 __free_page(page);
447 } else {
448 int loop;
449
450 prefetchw(page);
451 for (loop = 0; loop < BITS_PER_LONG; loop++) {
452 struct page *p = &page[loop];
453
454 if (loop + 1 < BITS_PER_LONG)
455 prefetchw(p + 1);
456 __ClearPageReserved(p);
457 set_page_count(p, 0);
458 }
459
460 set_page_refs(page, order);
460 set_page_refcounted(page);
461 __free_pages(page, order);
462 }
463}
464
465
466/*
467 * The order of subdivision here is critical for the IO subsystem.
468 * Please do not alter this order without good reasons and regression

--- 51 unchanged lines hidden (view full) ---

520 */
521 if (PageReserved(page))
522 return 1;
523
524 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
525 1 << PG_referenced | 1 << PG_arch_1 |
526 1 << PG_checked | 1 << PG_mappedtodisk);
527 set_page_private(page, 0);
461 __free_pages(page, order);
462 }
463}
464
465
466/*
467 * The order of subdivision here is critical for the IO subsystem.
468 * Please do not alter this order without good reasons and regression

--- 51 unchanged lines hidden (view full) ---

520 */
521 if (PageReserved(page))
522 return 1;
523
524 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
525 1 << PG_referenced | 1 << PG_arch_1 |
526 1 << PG_checked | 1 << PG_mappedtodisk);
527 set_page_private(page, 0);
528 set_page_refs(page, order);
528 set_page_refcounted(page);
529 kernel_map_pages(page, 1 << order, 1);
530 return 0;
531}
532
533/*
534 * Do the hard work of removing an element from the buddy allocator.
535 * Call me with the zone->lock already held.
536 */

--- 213 unchanged lines hidden (view full) ---

750 * Please consult with lkml before using this in your driver.
751 */
752void split_page(struct page *page, unsigned int order)
753{
754 int i;
755
756 BUG_ON(PageCompound(page));
757 BUG_ON(!page_count(page));
529 kernel_map_pages(page, 1 << order, 1);
530 return 0;
531}
532
533/*
534 * Do the hard work of removing an element from the buddy allocator.
535 * Call me with the zone->lock already held.
536 */

--- 213 unchanged lines hidden (view full) ---

750 * Please consult with lkml before using this in your driver.
751 */
752void split_page(struct page *page, unsigned int order)
753{
754 int i;
755
756 BUG_ON(PageCompound(page));
757 BUG_ON(!page_count(page));
758 for (i = 1; i < (1 << order); i++) {
759 BUG_ON(page_count(page + i));
760 set_page_count(page + i, 1);
761 }
758 for (i = 1; i < (1 << order); i++)
759 set_page_refcounted(page + i);
762}
763
764/*
765 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
766 * we cheat by calling it from here, in the order > 0 path. Saves a branch
767 * or two.
768 */
769static struct page *buffered_rmqueue(struct zonelist *zonelist,

--- 996 unchanged lines hidden (view full) ---

1766 unsigned long end_pfn = start_pfn + size;
1767 unsigned long pfn;
1768
1769 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1770 if (!early_pfn_valid(pfn))
1771 continue;
1772 page = pfn_to_page(pfn);
1773 set_page_links(page, zone, nid, pfn);
760}
761
762/*
763 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
764 * we cheat by calling it from here, in the order > 0 path. Saves a branch
765 * or two.
766 */
767static struct page *buffered_rmqueue(struct zonelist *zonelist,

--- 996 unchanged lines hidden (view full) ---

1764 unsigned long end_pfn = start_pfn + size;
1765 unsigned long pfn;
1766
1767 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1768 if (!early_pfn_valid(pfn))
1769 continue;
1770 page = pfn_to_page(pfn);
1771 set_page_links(page, zone, nid, pfn);
1774 set_page_count(page, 1);
1772 init_page_count(page);
1775 reset_page_mapcount(page);
1776 SetPageReserved(page);
1777 INIT_LIST_HEAD(&page->lru);
1778#ifdef WANT_PAGE_VIRTUAL
1779 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1780 if (!is_highmem_idx(zone))
1781 set_page_address(page, __va(pfn << PAGE_SHIFT));
1782#endif

--- 956 unchanged lines hidden ---
1773 reset_page_mapcount(page);
1774 SetPageReserved(page);
1775 INIT_LIST_HEAD(&page->lru);
1776#ifdef WANT_PAGE_VIRTUAL
1777 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1778 if (!is_highmem_idx(zone))
1779 set_page_address(page, __va(pfn << PAGE_SHIFT));
1780#endif

--- 956 unchanged lines hidden ---