vmscan.c (e815af95f94914993bbad279c71cf5fef9f4eaac) vmscan.c (d773ed6b856a96bd6d18b6e04455e3ced0876da4)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 1094 unchanged lines hidden (view full) ---

1103static unsigned long shrink_zone(int priority, struct zone *zone,
1104 struct scan_control *sc)
1105{
1106 unsigned long nr_active;
1107 unsigned long nr_inactive;
1108 unsigned long nr_to_scan;
1109 unsigned long nr_reclaimed = 0;
1110
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 1094 unchanged lines hidden (view full) ---

1103static unsigned long shrink_zone(int priority, struct zone *zone,
1104 struct scan_control *sc)
1105{
1106 unsigned long nr_active;
1107 unsigned long nr_inactive;
1108 unsigned long nr_to_scan;
1109 unsigned long nr_reclaimed = 0;
1110
1111 zone_set_flag(zone, ZONE_RECLAIM_LOCKED);
1112
1113 /*
1114 * Add one to `nr_to_scan' just to make sure that the kernel will
1115 * slowly sift through the active list.
1116 */
1117 zone->nr_scan_active +=
1118 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1119 nr_active = zone->nr_scan_active;
1120 if (nr_active >= sc->swap_cluster_max)

--- 22 unchanged lines hidden (view full) ---

1143 (unsigned long)sc->swap_cluster_max);
1144 nr_inactive -= nr_to_scan;
1145 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1146 sc);
1147 }
1148 }
1149
1150 throttle_vm_writeout(sc->gfp_mask);
1111 /*
1112 * Add one to `nr_to_scan' just to make sure that the kernel will
1113 * slowly sift through the active list.
1114 */
1115 zone->nr_scan_active +=
1116 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1117 nr_active = zone->nr_scan_active;
1118 if (nr_active >= sc->swap_cluster_max)

--- 22 unchanged lines hidden (view full) ---

1141 (unsigned long)sc->swap_cluster_max);
1142 nr_inactive -= nr_to_scan;
1143 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1144 sc);
1145 }
1146 }
1147
1148 throttle_vm_writeout(sc->gfp_mask);
1151
1152 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
1153 return nr_reclaimed;
1154}
1155
1156/*
1157 * This is the direct reclaim path, for page-allocating processes. We only
1158 * try to reclaim pages from zones which will satisfy the caller's allocation
1159 * request.
1160 *

--- 734 unchanged lines hidden (view full) ---

1895 p->reclaim_state = NULL;
1896 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1897 return nr_reclaimed >= nr_pages;
1898}
1899
1900int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1901{
1902 int node_id;
1149 return nr_reclaimed;
1150}
1151
1152/*
1153 * This is the direct reclaim path, for page-allocating processes. We only
1154 * try to reclaim pages from zones which will satisfy the caller's allocation
1155 * request.
1156 *

--- 734 unchanged lines hidden (view full) ---

1891 p->reclaim_state = NULL;
1892 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1893 return nr_reclaimed >= nr_pages;
1894}
1895
1896int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1897{
1898 int node_id;
1899 int ret;
1903
1904 /*
1905 * Zone reclaim reclaims unmapped file backed pages and
1906 * slab pages if we are over the defined limits.
1907 *
1908 * A small portion of unmapped file backed pages is needed for
1909 * file I/O otherwise pages read by file I/O will be immediately
1910 * thrown out if the zone is overallocated. So we do not reclaim
1911 * if less than a specified percentage of the zone is used by
1912 * unmapped file backed pages.
1913 */
1914 if (zone_page_state(zone, NR_FILE_PAGES) -
1915 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1916 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1917 <= zone->min_slab_pages)
1918 return 0;
1919
1900
1901 /*
1902 * Zone reclaim reclaims unmapped file backed pages and
1903 * slab pages if we are over the defined limits.
1904 *
1905 * A small portion of unmapped file backed pages is needed for
1906 * file I/O otherwise pages read by file I/O will be immediately
1907 * thrown out if the zone is overallocated. So we do not reclaim
1908 * if less than a specified percentage of the zone is used by
1909 * unmapped file backed pages.
1910 */
1911 if (zone_page_state(zone, NR_FILE_PAGES) -
1912 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1913 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1914 <= zone->min_slab_pages)
1915 return 0;
1916
1917 if (zone_is_all_unreclaimable(zone))
1918 return 0;
1919
1920 /*
1920 /*
1921 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1922 * not have reclaimable pages and if we should not delay the allocation
1923 * then do not scan.
1921 * Do not scan if the allocation should not be delayed.
1924 */
1922 */
1925 if (!(gfp_mask & __GFP_WAIT) || zone_is_all_unreclaimable(zone) ||
1926 zone_is_reclaim_locked(zone) || (current->flags & PF_MEMALLOC))
1923 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
1927 return 0;
1928
1929 /*
1930 * Only run zone reclaim on the local zone or on zones that do not
1931 * have associated processors. This will favor the local processor
1932 * over remote processors and spread off node memory allocations
1933 * as wide as possible.
1934 */
1935 node_id = zone_to_nid(zone);
1936 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
1937 return 0;
1924 return 0;
1925
1926 /*
1927 * Only run zone reclaim on the local zone or on zones that do not
1928 * have associated processors. This will favor the local processor
1929 * over remote processors and spread off node memory allocations
1930 * as wide as possible.
1931 */
1932 node_id = zone_to_nid(zone);
1933 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
1934 return 0;
1938 return __zone_reclaim(zone, gfp_mask, order);
1935
1936 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
1937 return 0;
1938 ret = __zone_reclaim(zone, gfp_mask, order);
1939 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
1940
1941 return ret;
1939}
1940#endif
1942}
1943#endif