mbcache.c (55f841ce9395a72c6285fbcc4c403c0c786e1c74) | mbcache.c (1ab6c4997e04a00c50c6d786c2f046adc0d1f5de) |
---|---|
1/* 2 * linux/fs/mbcache.c 3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> 4 */ 5 6/* 7 * Filesystem Meta Information Block Cache (mbcache) 8 * --- 72 unchanged lines hidden (view full) --- 81 * accessing cache data structures on SMP machines. The lru list is 82 * global across all mbcaches. 83 */ 84 85static LIST_HEAD(mb_cache_list); 86static LIST_HEAD(mb_cache_lru_list); 87static DEFINE_SPINLOCK(mb_cache_spinlock); 88 | 1/* 2 * linux/fs/mbcache.c 3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org> 4 */ 5 6/* 7 * Filesystem Meta Information Block Cache (mbcache) 8 * --- 72 unchanged lines hidden (view full) --- 81 * accessing cache data structures on SMP machines. The lru list is 82 * global across all mbcaches. 83 */ 84 85static LIST_HEAD(mb_cache_list); 86static LIST_HEAD(mb_cache_lru_list); 87static DEFINE_SPINLOCK(mb_cache_spinlock); 88 |
89/* 90 * What the mbcache registers as to get shrunk dynamically. 91 */ 92 93static int mb_cache_shrink_fn(struct shrinker *shrink, 94 struct shrink_control *sc); 95 96static struct shrinker mb_cache_shrinker = { 97 .shrink = mb_cache_shrink_fn, 98 .seeks = DEFAULT_SEEKS, 99}; 100 | |
101static inline int 102__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 103{ 104 return !list_empty(&ce->e_block_list); 105} 106 107 108static void --- 37 unchanged lines hidden (view full) --- 146 return; 147forget: 148 spin_unlock(&mb_cache_spinlock); 149 __mb_cache_entry_forget(ce, GFP_KERNEL); 150} 151 152 153/* | 89static inline int 90__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 91{ 92 return !list_empty(&ce->e_block_list); 93} 94 95 96static void --- 37 unchanged lines hidden (view full) --- 134 return; 135forget: 136 spin_unlock(&mb_cache_spinlock); 137 __mb_cache_entry_forget(ce, GFP_KERNEL); 138} 139 140 141/* |
154 * mb_cache_shrink_fn() memory pressure callback | 142 * mb_cache_shrink_scan() memory pressure callback |
155 * 156 * This function is called by the kernel memory management when memory 157 * gets low. 158 * 159 * @shrink: (ignored) 160 * @sc: shrink_control passed from reclaim 161 * | 143 * 144 * This function is called by the kernel memory management when memory 145 * gets low. 146 * 147 * @shrink: (ignored) 148 * @sc: shrink_control passed from reclaim 149 * |
162 * Returns the number of objects which are present in the cache. | 150 * Returns the number of objects freed. |
163 */ | 151 */ |
164static int 165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) | 152static unsigned long 153mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
166{ 167 LIST_HEAD(free_list); | 154{ 155 LIST_HEAD(free_list); |
168 struct mb_cache *cache; | |
169 struct mb_cache_entry *entry, *tmp; | 156 struct mb_cache_entry *entry, *tmp; |
170 int count = 0; | |
171 int nr_to_scan = sc->nr_to_scan; 172 gfp_t gfp_mask = sc->gfp_mask; | 157 int nr_to_scan = sc->nr_to_scan; 158 gfp_t gfp_mask = sc->gfp_mask; |
159 unsigned long freed = 0; |
|
173 174 mb_debug("trying to free %d entries", nr_to_scan); 175 spin_lock(&mb_cache_spinlock); 176 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { 177 struct mb_cache_entry *ce = 178 list_entry(mb_cache_lru_list.next, 179 struct mb_cache_entry, e_lru_list); 180 list_move_tail(&ce->e_lru_list, &free_list); 181 __mb_cache_entry_unhash(ce); | 160 161 mb_debug("trying to free %d entries", nr_to_scan); 162 spin_lock(&mb_cache_spinlock); 163 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) { 164 struct mb_cache_entry *ce = 165 list_entry(mb_cache_lru_list.next, 166 struct mb_cache_entry, e_lru_list); 167 list_move_tail(&ce->e_lru_list, &free_list); 168 __mb_cache_entry_unhash(ce); |
169 freed++; |
|
182 } | 170 } |
171 spin_unlock(&mb_cache_spinlock); 172 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { 173 __mb_cache_entry_forget(entry, gfp_mask); 174 } 175 return freed; 176} 177 178static unsigned long 179mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 180{ 181 struct mb_cache *cache; 182 unsigned long count = 0; 183 184 spin_lock(&mb_cache_spinlock); |
|
183 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { 184 mb_debug("cache %s (%d)", cache->c_name, 185 atomic_read(&cache->c_entry_count)); 186 count += atomic_read(&cache->c_entry_count); 187 } 188 spin_unlock(&mb_cache_spinlock); | 185 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { 186 mb_debug("cache %s (%d)", cache->c_name, 187 atomic_read(&cache->c_entry_count)); 188 count += atomic_read(&cache->c_entry_count); 189 } 190 spin_unlock(&mb_cache_spinlock); |
189 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { 190 __mb_cache_entry_forget(entry, gfp_mask); 191 } | 191 |
192 return vfs_pressure_ratio(count); 193} 194 | 192 return vfs_pressure_ratio(count); 193} 194 |
195static struct shrinker mb_cache_shrinker = { 196 .count_objects = mb_cache_shrink_count, 197 .scan_objects = mb_cache_shrink_scan, 198 .seeks = DEFAULT_SEEKS, 199}; |
|
195 196/* 197 * mb_cache_create() create a new cache 198 * 199 * All entries in one cache are equal size. Cache entries may be from 200 * multiple devices. If this is the first mbcache created, registers 201 * the cache with kernel memory management. Returns NULL if no more 202 * memory was available. --- 418 unchanged lines hidden --- | 200 201/* 202 * mb_cache_create() create a new cache 203 * 204 * All entries in one cache are equal size. Cache entries may be from 205 * multiple devices. If this is the first mbcache created, registers 206 * the cache with kernel memory management. Returns NULL if no more 207 * memory was available. --- 418 unchanged lines hidden --- |