compaction.c (53f724b2432a9f97a941251772f2b0d195e2d282) compaction.c (7f354a548d1cb6bb01b6ee74aee9264aa152f1ec)
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>

--- 534 unchanged lines hidden (view full) ---

543 */
544 if (strict && blockpfn < end_pfn)
545 total_isolated = 0;
546
547 /* Update the pageblock-skip if the whole pageblock was scanned */
548 if (blockpfn == end_pfn)
549 update_pageblock_skip(cc, valid_page, total_isolated, false);
550
1/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>

--- 534 unchanged lines hidden (view full) ---

543 */
544 if (strict && blockpfn < end_pfn)
545 total_isolated = 0;
546
547 /* Update the pageblock-skip if the whole pageblock was scanned */
548 if (blockpfn == end_pfn)
549 update_pageblock_skip(cc, valid_page, total_isolated, false);
550
551 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
551 cc->total_free_scanned += nr_scanned;
552 if (total_isolated)
553 count_compact_events(COMPACTISOLATED, total_isolated);
554 return total_isolated;
555}
556
557/**
558 * isolate_freepages_range() - isolate free pages.
559 * @start_pfn: The first PFN to start isolating.

--- 366 unchanged lines hidden (view full) ---

926 * if the whole pageblock was scanned without isolating any page.
927 */
928 if (low_pfn == end_pfn)
929 update_pageblock_skip(cc, valid_page, nr_isolated, true);
930
931 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
932 nr_scanned, nr_isolated);
933
552 if (total_isolated)
553 count_compact_events(COMPACTISOLATED, total_isolated);
554 return total_isolated;
555}
556
557/**
558 * isolate_freepages_range() - isolate free pages.
559 * @start_pfn: The first PFN to start isolating.

--- 366 unchanged lines hidden (view full) ---

926 * if the whole pageblock was scanned without isolating any page.
927 */
928 if (low_pfn == end_pfn)
929 update_pageblock_skip(cc, valid_page, nr_isolated, true);
930
931 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
932 nr_scanned, nr_isolated);
933
934 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
934 cc->total_migrate_scanned += nr_scanned;
935 if (nr_isolated)
936 count_compact_events(COMPACTISOLATED, nr_isolated);
937
938 return low_pfn;
939}
940
941/**
942 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range

--- 683 unchanged lines hidden (view full) ---

1626 /*
1627 * Only go back, not forward. The cached pfn might have been
1628 * already reset to zone end in compact_finished()
1629 */
1630 if (free_pfn > zone->compact_cached_free_pfn)
1631 zone->compact_cached_free_pfn = free_pfn;
1632 }
1633
935 if (nr_isolated)
936 count_compact_events(COMPACTISOLATED, nr_isolated);
937
938 return low_pfn;
939}
940
941/**
942 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range

--- 683 unchanged lines hidden (view full) ---

1626 /*
1627 * Only go back, not forward. The cached pfn might have been
1628 * already reset to zone end in compact_finished()
1629 */
1630 if (free_pfn > zone->compact_cached_free_pfn)
1631 zone->compact_cached_free_pfn = free_pfn;
1632 }
1633
1634 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1635 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1636
1634 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1635 cc->free_pfn, end_pfn, sync, ret);
1636
1637 return ret;
1638}
1639
1640static enum compact_result compact_zone_order(struct zone *zone, int order,
1641 gfp_t gfp_mask, enum compact_priority prio,
1642 unsigned int alloc_flags, int classzone_idx)
1643{
1644 enum compact_result ret;
1645 struct compact_control cc = {
1646 .nr_freepages = 0,
1647 .nr_migratepages = 0,
1637 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1638 cc->free_pfn, end_pfn, sync, ret);
1639
1640 return ret;
1641}
1642
1643static enum compact_result compact_zone_order(struct zone *zone, int order,
1644 gfp_t gfp_mask, enum compact_priority prio,
1645 unsigned int alloc_flags, int classzone_idx)
1646{
1647 enum compact_result ret;
1648 struct compact_control cc = {
1649 .nr_freepages = 0,
1650 .nr_migratepages = 0,
1651 .total_migrate_scanned = 0,
1652 .total_free_scanned = 0,
1648 .order = order,
1649 .gfp_mask = gfp_mask,
1650 .zone = zone,
1651 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1652 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1653 .alloc_flags = alloc_flags,
1654 .classzone_idx = classzone_idx,
1655 .direct_compaction = true,

--- 96 unchanged lines hidden (view full) ---

1752/* Compact all zones within a node */
1753static void compact_node(int nid)
1754{
1755 pg_data_t *pgdat = NODE_DATA(nid);
1756 int zoneid;
1757 struct zone *zone;
1758 struct compact_control cc = {
1759 .order = -1,
1653 .order = order,
1654 .gfp_mask = gfp_mask,
1655 .zone = zone,
1656 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1657 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1658 .alloc_flags = alloc_flags,
1659 .classzone_idx = classzone_idx,
1660 .direct_compaction = true,

--- 96 unchanged lines hidden (view full) ---

1757/* Compact all zones within a node */
1758static void compact_node(int nid)
1759{
1760 pg_data_t *pgdat = NODE_DATA(nid);
1761 int zoneid;
1762 struct zone *zone;
1763 struct compact_control cc = {
1764 .order = -1,
1765 .total_migrate_scanned = 0,
1766 .total_free_scanned = 0,
1760 .mode = MIGRATE_SYNC,
1761 .ignore_skip_hint = true,
1762 .whole_zone = true,
1763 .gfp_mask = GFP_KERNEL,
1764 };
1765
1766
1767 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

--- 110 unchanged lines hidden (view full) ---

1878 /*
1879 * With no special task, compact all zones so that a page of requested
1880 * order is allocatable.
1881 */
1882 int zoneid;
1883 struct zone *zone;
1884 struct compact_control cc = {
1885 .order = pgdat->kcompactd_max_order,
1767 .mode = MIGRATE_SYNC,
1768 .ignore_skip_hint = true,
1769 .whole_zone = true,
1770 .gfp_mask = GFP_KERNEL,
1771 };
1772
1773
1774 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

--- 110 unchanged lines hidden (view full) ---

1885 /*
1886 * With no special task, compact all zones so that a page of requested
1887 * order is allocatable.
1888 */
1889 int zoneid;
1890 struct zone *zone;
1891 struct compact_control cc = {
1892 .order = pgdat->kcompactd_max_order,
1893 .total_migrate_scanned = 0,
1894 .total_free_scanned = 0,
1886 .classzone_idx = pgdat->kcompactd_classzone_idx,
1887 .mode = MIGRATE_SYNC_LIGHT,
1888 .ignore_skip_hint = true,
1889 .gfp_mask = GFP_KERNEL,
1890
1891 };
1892 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1893 cc.classzone_idx);
1895 .classzone_idx = pgdat->kcompactd_classzone_idx,
1896 .mode = MIGRATE_SYNC_LIGHT,
1897 .ignore_skip_hint = true,
1898 .gfp_mask = GFP_KERNEL,
1899
1900 };
1901 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1902 cc.classzone_idx);
1894 count_vm_event(KCOMPACTD_WAKE);
1903 count_compact_event(KCOMPACTD_WAKE);
1895
1896 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1897 int status;
1898
1899 zone = &pgdat->node_zones[zoneid];
1900 if (!populated_zone(zone))
1901 continue;
1902
1903 if (compaction_deferred(zone, cc.order))
1904 continue;
1905
1906 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1907 COMPACT_CONTINUE)
1908 continue;
1909
1910 cc.nr_freepages = 0;
1911 cc.nr_migratepages = 0;
1904
1905 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1906 int status;
1907
1908 zone = &pgdat->node_zones[zoneid];
1909 if (!populated_zone(zone))
1910 continue;
1911
1912 if (compaction_deferred(zone, cc.order))
1913 continue;
1914
1915 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1916 COMPACT_CONTINUE)
1917 continue;
1918
1919 cc.nr_freepages = 0;
1920 cc.nr_migratepages = 0;
1921 cc.total_migrate_scanned = 0;
1922 cc.total_free_scanned = 0;
1912 cc.zone = zone;
1913 INIT_LIST_HEAD(&cc.freepages);
1914 INIT_LIST_HEAD(&cc.migratepages);
1915
1916 if (kthread_should_stop())
1917 return;
1918 status = compact_zone(zone, &cc);
1919
1920 if (status == COMPACT_SUCCESS) {
1921 compaction_defer_reset(zone, cc.order, false);
1922 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1923 /*
1924 * We use sync migration mode here, so we defer like
1925 * sync direct compaction does.
1926 */
1927 defer_compaction(zone, cc.order);
1928 }
1929
1923 cc.zone = zone;
1924 INIT_LIST_HEAD(&cc.freepages);
1925 INIT_LIST_HEAD(&cc.migratepages);
1926
1927 if (kthread_should_stop())
1928 return;
1929 status = compact_zone(zone, &cc);
1930
1931 if (status == COMPACT_SUCCESS) {
1932 compaction_defer_reset(zone, cc.order, false);
1933 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1934 /*
1935 * We use sync migration mode here, so we defer like
1936 * sync direct compaction does.
1937 */
1938 defer_compaction(zone, cc.order);
1939 }
1940
1941 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
1942 cc.total_migrate_scanned);
1943 count_compact_events(KCOMPACTD_FREE_SCANNED,
1944 cc.total_free_scanned);
1945
1930 VM_BUG_ON(!list_empty(&cc.freepages));
1931 VM_BUG_ON(!list_empty(&cc.migratepages));
1932 }
1933
1934 /*
1935 * Regardless of success, we are done until woken up next. But remember
1936 * the requested order/classzone_idx in case it was higher/tighter than
1937 * our current ones

--- 137 unchanged lines hidden ---
1946 VM_BUG_ON(!list_empty(&cc.freepages));
1947 VM_BUG_ON(!list_empty(&cc.migratepages));
1948 }
1949
1950 /*
1951 * Regardless of success, we are done until woken up next. But remember
1952 * the requested order/classzone_idx in case it was higher/tighter than
1953 * our current ones

--- 137 unchanged lines hidden ---