dir.c (55f841ce9395a72c6285fbcc4c403c0c786e1c74) dir.c (1ab6c4997e04a00c50c6d786c2f046adc0d1f5de)
1/*
2 * linux/fs/nfs/dir.c
3 *
4 * Copyright (C) 1992 Rick Sladkey
5 *
6 * nfs directory handling functions
7 *
8 * 10 Apr 1996 Added silly rename for unlink --okir

--- 1992 unchanged lines hidden (view full) ---

2001
2002 while (!list_empty(head)) {
2003 cache = list_entry(head->next, struct nfs_access_entry, lru);
2004 list_del(&cache->lru);
2005 nfs_access_free_entry(cache);
2006 }
2007}
2008
1/*
2 * linux/fs/nfs/dir.c
3 *
4 * Copyright (C) 1992 Rick Sladkey
5 *
6 * nfs directory handling functions
7 *
8 * 10 Apr 1996 Added silly rename for unlink --okir

--- 1992 unchanged lines hidden (view full) ---

2001
2002 while (!list_empty(head)) {
2003 cache = list_entry(head->next, struct nfs_access_entry, lru);
2004 list_del(&cache->lru);
2005 nfs_access_free_entry(cache);
2006 }
2007}
2008
2009int nfs_access_cache_shrinker(struct shrinker *shrink,
2010 struct shrink_control *sc)
2009unsigned long
2010nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
2011{
2012 LIST_HEAD(head);
2013 struct nfs_inode *nfsi, *next;
2014 struct nfs_access_entry *cache;
2015 int nr_to_scan = sc->nr_to_scan;
2016 gfp_t gfp_mask = sc->gfp_mask;
2011{
2012 LIST_HEAD(head);
2013 struct nfs_inode *nfsi, *next;
2014 struct nfs_access_entry *cache;
2015 int nr_to_scan = sc->nr_to_scan;
2016 gfp_t gfp_mask = sc->gfp_mask;
2017 long freed = 0;
2017
2018 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2018
2019 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2019 return (nr_to_scan == 0) ? 0 : -1;
2020 return SHRINK_STOP;
2020
2021 spin_lock(&nfs_access_lru_lock);
2022 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
2023 struct inode *inode;
2024
2025 if (nr_to_scan-- == 0)
2026 break;
2027 inode = &nfsi->vfs_inode;
2028 spin_lock(&inode->i_lock);
2029 if (list_empty(&nfsi->access_cache_entry_lru))
2030 goto remove_lru_entry;
2031 cache = list_entry(nfsi->access_cache_entry_lru.next,
2032 struct nfs_access_entry, lru);
2033 list_move(&cache->lru, &head);
2034 rb_erase(&cache->rb_node, &nfsi->access_cache);
2021
2022 spin_lock(&nfs_access_lru_lock);
2023 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
2024 struct inode *inode;
2025
2026 if (nr_to_scan-- == 0)
2027 break;
2028 inode = &nfsi->vfs_inode;
2029 spin_lock(&inode->i_lock);
2030 if (list_empty(&nfsi->access_cache_entry_lru))
2031 goto remove_lru_entry;
2032 cache = list_entry(nfsi->access_cache_entry_lru.next,
2033 struct nfs_access_entry, lru);
2034 list_move(&cache->lru, &head);
2035 rb_erase(&cache->rb_node, &nfsi->access_cache);
2036 freed++;
2035 if (!list_empty(&nfsi->access_cache_entry_lru))
2036 list_move_tail(&nfsi->access_cache_inode_lru,
2037 &nfs_access_lru_list);
2038 else {
2039remove_lru_entry:
2040 list_del_init(&nfsi->access_cache_inode_lru);
2041 smp_mb__before_clear_bit();
2042 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
2043 smp_mb__after_clear_bit();
2044 }
2045 spin_unlock(&inode->i_lock);
2046 }
2047 spin_unlock(&nfs_access_lru_lock);
2048 nfs_access_free_list(&head);
2037 if (!list_empty(&nfsi->access_cache_entry_lru))
2038 list_move_tail(&nfsi->access_cache_inode_lru,
2039 &nfs_access_lru_list);
2040 else {
2041remove_lru_entry:
2042 list_del_init(&nfsi->access_cache_inode_lru);
2043 smp_mb__before_clear_bit();
2044 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
2045 smp_mb__after_clear_bit();
2046 }
2047 spin_unlock(&inode->i_lock);
2048 }
2049 spin_unlock(&nfs_access_lru_lock);
2050 nfs_access_free_list(&head);
2051 return freed;
2052}
2053
2054unsigned long
2055nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
2056{
2049 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2050}
2051
2052static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
2053{
2054 struct rb_root *root_node = &nfsi->access_cache;
2055 struct rb_node *n;
2056 struct nfs_access_entry *entry;

--- 273 unchanged lines hidden ---
2057 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2058}
2059
2060static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
2061{
2062 struct rb_root *root_node = &nfsi->access_cache;
2063 struct rb_node *n;
2064 struct nfs_access_entry *entry;

--- 273 unchanged lines hidden ---