vmscan.c (aa3f18b3391ac305baa01faead3fdf9147daf54b) vmscan.c (a92f71263af9d0ab77c260f709c0c079656221aa)
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 1582 unchanged lines hidden (view full) ---

1591 * zone_reclaim().
1592 */
1593int zone_reclaim_mode __read_mostly;
1594
1595/*
1596 * Mininum time between zone reclaim scans
1597 */
1598#define ZONE_RECLAIM_INTERVAL 30*HZ
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed

--- 1582 unchanged lines hidden (view full) ---

1591 * zone_reclaim().
1592 */
1593int zone_reclaim_mode __read_mostly;
1594
1595/*
1596 * Mininum time between zone reclaim scans
1597 */
1598#define ZONE_RECLAIM_INTERVAL 30*HZ
1599
1599/*
1600/*
1601 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1602 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1603 * a zone.
1604 */
1605#define ZONE_RECLAIM_PRIORITY 4
1606
1607/*
1600 * Try to free up some pages from this zone through reclaim.
1601 */
1602int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1603{
1604 int nr_pages;
1605 struct task_struct *p = current;
1606 struct reclaim_state reclaim_state;
1607 struct scan_control sc;

--- 13 unchanged lines hidden (view full) ---

1621 mask = node_to_cpumask(node_id);
1622 if (!cpus_empty(mask) && node_id != numa_node_id())
1623 return 0;
1624
1625 sc.may_writepage = 0;
1626 sc.may_swap = 0;
1627 sc.nr_scanned = 0;
1628 sc.nr_reclaimed = 0;
1608 * Try to free up some pages from this zone through reclaim.
1609 */
1610int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1611{
1612 int nr_pages;
1613 struct task_struct *p = current;
1614 struct reclaim_state reclaim_state;
1615 struct scan_control sc;

--- 13 unchanged lines hidden (view full) ---

1629 mask = node_to_cpumask(node_id);
1630 if (!cpus_empty(mask) && node_id != numa_node_id())
1631 return 0;
1632
1633 sc.may_writepage = 0;
1634 sc.may_swap = 0;
1635 sc.nr_scanned = 0;
1636 sc.nr_reclaimed = 0;
1629 sc.priority = 0;
1637 sc.priority = ZONE_RECLAIM_PRIORITY + 1;
1630 sc.nr_mapped = read_page_state(nr_mapped);
1631 sc.gfp_mask = gfp_mask;
1632
1633 disable_swap_token();
1634
1635 nr_pages = 1 << order;
1636 if (nr_pages > SWAP_CLUSTER_MAX)
1637 sc.swap_cluster_max = nr_pages;
1638 else
1639 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1640
1641 cond_resched();
1642 p->flags |= PF_MEMALLOC;
1643 reclaim_state.reclaimed_slab = 0;
1644 p->reclaim_state = &reclaim_state;
1645
1638 sc.nr_mapped = read_page_state(nr_mapped);
1639 sc.gfp_mask = gfp_mask;
1640
1641 disable_swap_token();
1642
1643 nr_pages = 1 << order;
1644 if (nr_pages > SWAP_CLUSTER_MAX)
1645 sc.swap_cluster_max = nr_pages;
1646 else
1647 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1648
1649 cond_resched();
1650 p->flags |= PF_MEMALLOC;
1651 reclaim_state.reclaimed_slab = 0;
1652 p->reclaim_state = &reclaim_state;
1653
1646 shrink_zone(zone, &sc);
1654 /*
1655 * Free memory by calling shrink zone with increasing priorities
1656 * until we have enough memory freed.
1657 */
1658 do {
1659 sc.priority--;
1660 shrink_zone(zone, &sc);
1647
1661
1662 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
1663
1648 p->reclaim_state = NULL;
1649 current->flags &= ~PF_MEMALLOC;
1650
1651 if (sc.nr_reclaimed == 0)
1652 zone->last_unsuccessful_zone_reclaim = jiffies;
1653
1654 return sc.nr_reclaimed >= nr_pages;
1655}
1656#endif
1657
1664 p->reclaim_state = NULL;
1665 current->flags &= ~PF_MEMALLOC;
1666
1667 if (sc.nr_reclaimed == 0)
1668 zone->last_unsuccessful_zone_reclaim = jiffies;
1669
1670 return sc.nr_reclaimed >= nr_pages;
1671}
1672#endif
1673