nfscache.c (27eb2c4b3d3e13f376a359e293c212a2e9407af5) nfscache.c (1ab6c4997e04a00c50c6d786c2f046adc0d1f5de)
1/*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>

--- 45 unchanged lines hidden (view full) ---

54/* longest hash chain seen */
55static unsigned int longest_chain;
56
57/* size of cache when we saw the longest hash chain */
58static unsigned int longest_chain_cachesize;
59
60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused);
1/*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>

--- 45 unchanged lines hidden (view full) ---

54/* longest hash chain seen */
55static unsigned int longest_chain;
56
57/* size of cache when we saw the longest hash chain */
58static unsigned int longest_chain_cachesize;
59
60static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61static void cache_cleaner_func(struct work_struct *unused);
62static int nfsd_reply_cache_shrink(struct shrinker *shrink,
63 struct shrink_control *sc);
62static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
63 struct shrink_control *sc);
64static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
65 struct shrink_control *sc);
64
65static struct shrinker nfsd_reply_cache_shrinker = {
66
67static struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink,
68 .scan_objects = nfsd_reply_cache_scan,
69 .count_objects = nfsd_reply_cache_count,
67 .seeks = 1,
68};
69
70/*
71 * locking for the reply cache:
72 * A cache entry is "single use" if c_state == RC_INPROG
73 * Otherwise, it when accessing _prev or _next, the lock must be held.
74 */

--- 152 unchanged lines hidden (view full) ---

227 return rp->c_state != RC_INPROG &&
228 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
229}
230
231/*
232 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
233 * Also prune the oldest ones when the total exceeds the max number of entries.
234 */
70 .seeks = 1,
71};
72
73/*
74 * locking for the reply cache:
75 * A cache entry is "single use" if c_state == RC_INPROG
76 * Otherwise, it when accessing _prev or _next, the lock must be held.
77 */

--- 152 unchanged lines hidden (view full) ---

230 return rp->c_state != RC_INPROG &&
231 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
232}
233
234/*
235 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
236 * Also prune the oldest ones when the total exceeds the max number of entries.
237 */
235static void
238static long
236prune_cache_entries(void)
237{
238 struct svc_cacherep *rp, *tmp;
239prune_cache_entries(void)
240{
241 struct svc_cacherep *rp, *tmp;
242 long freed = 0;
239
240 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
241 if (!nfsd_cache_entry_expired(rp) &&
242 num_drc_entries <= max_drc_entries)
243 break;
244 nfsd_reply_cache_free_locked(rp);
243
244 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
245 if (!nfsd_cache_entry_expired(rp) &&
246 num_drc_entries <= max_drc_entries)
247 break;
248 nfsd_reply_cache_free_locked(rp);
249 freed++;
245 }
246
247 /*
248 * Conditionally rearm the job. If we cleaned out the list, then
249 * cancel any pending run (since there won't be any work to do).
250 * Otherwise, we rearm the job or modify the existing one to run in
251 * RC_EXPIRE since we just ran the pruner.
252 */
253 if (list_empty(&lru_head))
254 cancel_delayed_work(&cache_cleaner);
255 else
256 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
250 }
251
252 /*
253 * Conditionally rearm the job. If we cleaned out the list, then
254 * cancel any pending run (since there won't be any work to do).
255 * Otherwise, we rearm the job or modify the existing one to run in
256 * RC_EXPIRE since we just ran the pruner.
257 */
258 if (list_empty(&lru_head))
259 cancel_delayed_work(&cache_cleaner);
260 else
261 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
262 return freed;
257}
258
259static void
260cache_cleaner_func(struct work_struct *unused)
261{
262 spin_lock(&cache_lock);
263 prune_cache_entries();
264 spin_unlock(&cache_lock);
265}
266
263}
264
265static void
266cache_cleaner_func(struct work_struct *unused)
267{
268 spin_lock(&cache_lock);
269 prune_cache_entries();
270 spin_unlock(&cache_lock);
271}
272
267static int
268nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
273static unsigned long
274nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
269{
275{
270 unsigned int num;
276 unsigned long num;
271
272 spin_lock(&cache_lock);
277
278 spin_lock(&cache_lock);
273 if (sc->nr_to_scan)
274 prune_cache_entries();
275 num = num_drc_entries;
276 spin_unlock(&cache_lock);
277
278 return num;
279}
280
279 num = num_drc_entries;
280 spin_unlock(&cache_lock);
281
282 return num;
283}
284
285static unsigned long
286nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
287{
288 unsigned long freed;
289
290 spin_lock(&cache_lock);
291 freed = prune_cache_entries();
292 spin_unlock(&cache_lock);
293 return freed;
294}
281/*
282 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
283 */
284static __wsum
285nfsd_cache_csum(struct svc_rqst *rqstp)
286{
287 int idx;
288 unsigned int base;

--- 332 unchanged lines hidden ---
295/*
296 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
297 */
298static __wsum
299nfsd_cache_csum(struct svc_rqst *rqstp)
300{
301 int idx;
302 unsigned int base;

--- 332 unchanged lines hidden ---