xref: /openbmc/linux/fs/nfsd/nfscache.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * Request reply cache. This is currently a global cache, but this may
3  * change in the future and be a per-client cache.
4  *
5  * This code is heavily inspired by the 44BSD implementation, although
6  * it does things a bit differently.
7  *
8  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <net/checksum.h>
15 
16 #include "nfsd.h"
17 #include "cache.h"
18 
19 #define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
20 
21 #define HASHSIZE		64
22 
23 static struct hlist_head *	cache_hash;
24 static struct list_head 	lru_head;
25 static struct kmem_cache	*drc_slab;
26 static unsigned int		num_drc_entries;
27 static unsigned int		max_drc_entries;
28 
29 /*
30  * Calculate the hash index from an XID.
31  */
32 static inline u32 request_hash(u32 xid)
33 {
34 	u32 h = xid;
35 	h ^= (xid >> 24);
36 	return h & (HASHSIZE-1);
37 }
38 
39 static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
40 static void	cache_cleaner_func(struct work_struct *unused);
41 static int 	nfsd_reply_cache_shrink(struct shrinker *shrink,
42 					struct shrink_control *sc);
43 
44 struct shrinker nfsd_reply_cache_shrinker = {
45 	.shrink	= nfsd_reply_cache_shrink,
46 	.seeks	= 1,
47 };
48 
49 /*
50  * locking for the reply cache:
51  * A cache entry is "single use" if c_state == RC_INPROG
52  * Otherwise, it when accessing _prev or _next, the lock must be held.
53  */
54 static DEFINE_SPINLOCK(cache_lock);
55 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
56 
57 /*
58  * Put a cap on the size of the DRC based on the amount of available
59  * low memory in the machine.
60  *
61  *  64MB:    8192
62  * 128MB:   11585
63  * 256MB:   16384
64  * 512MB:   23170
65  *   1GB:   32768
66  *   2GB:   46340
67  *   4GB:   65536
68  *   8GB:   92681
69  *  16GB:  131072
70  *
71  * ...with a hard cap of 256k entries. In the worst case, each entry will be
72  * ~1k, so the above numbers should give a rough max of the amount of memory
73  * used in k.
74  */
75 static unsigned int
76 nfsd_cache_size_limit(void)
77 {
78 	unsigned int limit;
79 	unsigned long low_pages = totalram_pages - totalhigh_pages;
80 
81 	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
82 	return min_t(unsigned int, limit, 256*1024);
83 }
84 
85 static struct svc_cacherep *
86 nfsd_reply_cache_alloc(void)
87 {
88 	struct svc_cacherep	*rp;
89 
90 	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
91 	if (rp) {
92 		rp->c_state = RC_UNUSED;
93 		rp->c_type = RC_NOCACHE;
94 		INIT_LIST_HEAD(&rp->c_lru);
95 		INIT_HLIST_NODE(&rp->c_hash);
96 	}
97 	return rp;
98 }
99 
100 static void
101 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
102 {
103 	if (rp->c_type == RC_REPLBUFF)
104 		kfree(rp->c_replvec.iov_base);
105 	hlist_del(&rp->c_hash);
106 	list_del(&rp->c_lru);
107 	--num_drc_entries;
108 	kmem_cache_free(drc_slab, rp);
109 }
110 
111 static void
112 nfsd_reply_cache_free(struct svc_cacherep *rp)
113 {
114 	spin_lock(&cache_lock);
115 	nfsd_reply_cache_free_locked(rp);
116 	spin_unlock(&cache_lock);
117 }
118 
119 int nfsd_reply_cache_init(void)
120 {
121 	register_shrinker(&nfsd_reply_cache_shrinker);
122 	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
123 					0, 0, NULL);
124 	if (!drc_slab)
125 		goto out_nomem;
126 
127 	cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
128 	if (!cache_hash)
129 		goto out_nomem;
130 
131 	INIT_LIST_HEAD(&lru_head);
132 	max_drc_entries = nfsd_cache_size_limit();
133 	num_drc_entries = 0;
134 
135 	return 0;
136 out_nomem:
137 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
138 	nfsd_reply_cache_shutdown();
139 	return -ENOMEM;
140 }
141 
142 void nfsd_reply_cache_shutdown(void)
143 {
144 	struct svc_cacherep	*rp;
145 
146 	unregister_shrinker(&nfsd_reply_cache_shrinker);
147 	cancel_delayed_work_sync(&cache_cleaner);
148 
149 	while (!list_empty(&lru_head)) {
150 		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
151 		nfsd_reply_cache_free_locked(rp);
152 	}
153 
154 	kfree (cache_hash);
155 	cache_hash = NULL;
156 
157 	if (drc_slab) {
158 		kmem_cache_destroy(drc_slab);
159 		drc_slab = NULL;
160 	}
161 }
162 
163 /*
164  * Move cache entry to end of LRU list, and queue the cleaner to run if it's
165  * not already scheduled.
166  */
167 static void
168 lru_put_end(struct svc_cacherep *rp)
169 {
170 	rp->c_timestamp = jiffies;
171 	list_move_tail(&rp->c_lru, &lru_head);
172 	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
173 }
174 
175 /*
176  * Move a cache entry from one hash list to another
177  */
178 static void
179 hash_refile(struct svc_cacherep *rp)
180 {
181 	hlist_del_init(&rp->c_hash);
182 	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
183 }
184 
185 static inline bool
186 nfsd_cache_entry_expired(struct svc_cacherep *rp)
187 {
188 	return rp->c_state != RC_INPROG &&
189 	       time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
190 }
191 
192 /*
193  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
194  * Also prune the oldest ones when the total exceeds the max number of entries.
195  */
196 static void
197 prune_cache_entries(void)
198 {
199 	struct svc_cacherep *rp, *tmp;
200 
201 	list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
202 		if (!nfsd_cache_entry_expired(rp) &&
203 		    num_drc_entries <= max_drc_entries)
204 			break;
205 		nfsd_reply_cache_free_locked(rp);
206 	}
207 
208 	/*
209 	 * Conditionally rearm the job. If we cleaned out the list, then
210 	 * cancel any pending run (since there won't be any work to do).
211 	 * Otherwise, we rearm the job or modify the existing one to run in
212 	 * RC_EXPIRE since we just ran the pruner.
213 	 */
214 	if (list_empty(&lru_head))
215 		cancel_delayed_work(&cache_cleaner);
216 	else
217 		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
218 }
219 
220 static void
221 cache_cleaner_func(struct work_struct *unused)
222 {
223 	spin_lock(&cache_lock);
224 	prune_cache_entries();
225 	spin_unlock(&cache_lock);
226 }
227 
228 static int
229 nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
230 {
231 	unsigned int num;
232 
233 	spin_lock(&cache_lock);
234 	if (sc->nr_to_scan)
235 		prune_cache_entries();
236 	num = num_drc_entries;
237 	spin_unlock(&cache_lock);
238 
239 	return num;
240 }
241 
242 /*
243  * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
244  */
245 static __wsum
246 nfsd_cache_csum(struct svc_rqst *rqstp)
247 {
248 	int idx;
249 	unsigned int base;
250 	__wsum csum;
251 	struct xdr_buf *buf = &rqstp->rq_arg;
252 	const unsigned char *p = buf->head[0].iov_base;
253 	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
254 				RC_CSUMLEN);
255 	size_t len = min(buf->head[0].iov_len, csum_len);
256 
257 	/* rq_arg.head first */
258 	csum = csum_partial(p, len, 0);
259 	csum_len -= len;
260 
261 	/* Continue into page array */
262 	idx = buf->page_base / PAGE_SIZE;
263 	base = buf->page_base & ~PAGE_MASK;
264 	while (csum_len) {
265 		p = page_address(buf->pages[idx]) + base;
266 		len = min_t(size_t, PAGE_SIZE - base, csum_len);
267 		csum = csum_partial(p, len, csum);
268 		csum_len -= len;
269 		base = 0;
270 		++idx;
271 	}
272 	return csum;
273 }
274 
275 /*
276  * Search the request hash for an entry that matches the given rqstp.
277  * Must be called with cache_lock held. Returns the found entry or
278  * NULL on failure.
279  */
280 static struct svc_cacherep *
281 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
282 {
283 	struct svc_cacherep	*rp;
284 	struct hlist_head 	*rh;
285 	__be32			xid = rqstp->rq_xid;
286 	u32			proto =  rqstp->rq_prot,
287 				vers = rqstp->rq_vers,
288 				proc = rqstp->rq_proc;
289 
290 	rh = &cache_hash[request_hash(xid)];
291 	hlist_for_each_entry(rp, rh, c_hash) {
292 		if (xid == rp->c_xid && proc == rp->c_proc &&
293 		    proto == rp->c_prot && vers == rp->c_vers &&
294 		    rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum &&
295 		    rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
296 		    rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
297 			return rp;
298 	}
299 	return NULL;
300 }
301 
302 /*
303  * Try to find an entry matching the current call in the cache. When none
304  * is found, we try to grab the oldest expired entry off the LRU list. If
305  * a suitable one isn't there, then drop the cache_lock and allocate a
306  * new one, then search again in case one got inserted while this thread
307  * didn't hold the lock.
308  */
309 int
310 nfsd_cache_lookup(struct svc_rqst *rqstp)
311 {
312 	struct svc_cacherep	*rp, *found;
313 	__be32			xid = rqstp->rq_xid;
314 	u32			proto =  rqstp->rq_prot,
315 				vers = rqstp->rq_vers,
316 				proc = rqstp->rq_proc;
317 	__wsum			csum;
318 	unsigned long		age;
319 	int type = rqstp->rq_cachetype;
320 	int rtn;
321 
322 	rqstp->rq_cacherep = NULL;
323 	if (type == RC_NOCACHE) {
324 		nfsdstats.rcnocache++;
325 		return RC_DOIT;
326 	}
327 
328 	csum = nfsd_cache_csum(rqstp);
329 
330 	spin_lock(&cache_lock);
331 	rtn = RC_DOIT;
332 
333 	rp = nfsd_cache_search(rqstp, csum);
334 	if (rp)
335 		goto found_entry;
336 
337 	/* Try to use the first entry on the LRU */
338 	if (!list_empty(&lru_head)) {
339 		rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
340 		if (nfsd_cache_entry_expired(rp) ||
341 		    num_drc_entries >= max_drc_entries) {
342 			lru_put_end(rp);
343 			prune_cache_entries();
344 			goto setup_entry;
345 		}
346 	}
347 
348 	/* Drop the lock and allocate a new entry */
349 	spin_unlock(&cache_lock);
350 	rp = nfsd_reply_cache_alloc();
351 	if (!rp) {
352 		dprintk("nfsd: unable to allocate DRC entry!\n");
353 		return RC_DOIT;
354 	}
355 	spin_lock(&cache_lock);
356 	++num_drc_entries;
357 
358 	/*
359 	 * Must search again just in case someone inserted one
360 	 * after we dropped the lock above.
361 	 */
362 	found = nfsd_cache_search(rqstp, csum);
363 	if (found) {
364 		nfsd_reply_cache_free_locked(rp);
365 		rp = found;
366 		goto found_entry;
367 	}
368 
369 	/*
370 	 * We're keeping the one we just allocated. Are we now over the
371 	 * limit? Prune one off the tip of the LRU in trade for the one we
372 	 * just allocated if so.
373 	 */
374 	if (num_drc_entries >= max_drc_entries)
375 		nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
376 						struct svc_cacherep, c_lru));
377 
378 setup_entry:
379 	nfsdstats.rcmisses++;
380 	rqstp->rq_cacherep = rp;
381 	rp->c_state = RC_INPROG;
382 	rp->c_xid = xid;
383 	rp->c_proc = proc;
384 	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
385 	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
386 	rp->c_prot = proto;
387 	rp->c_vers = vers;
388 	rp->c_len = rqstp->rq_arg.len;
389 	rp->c_csum = csum;
390 
391 	hash_refile(rp);
392 	lru_put_end(rp);
393 
394 	/* release any buffer */
395 	if (rp->c_type == RC_REPLBUFF) {
396 		kfree(rp->c_replvec.iov_base);
397 		rp->c_replvec.iov_base = NULL;
398 	}
399 	rp->c_type = RC_NOCACHE;
400  out:
401 	spin_unlock(&cache_lock);
402 	return rtn;
403 
404 found_entry:
405 	nfsdstats.rchits++;
406 	/* We found a matching entry which is either in progress or done. */
407 	age = jiffies - rp->c_timestamp;
408 	lru_put_end(rp);
409 
410 	rtn = RC_DROPIT;
411 	/* Request being processed or excessive rexmits */
412 	if (rp->c_state == RC_INPROG || age < RC_DELAY)
413 		goto out;
414 
415 	/* From the hall of fame of impractical attacks:
416 	 * Is this a user who tries to snoop on the cache? */
417 	rtn = RC_DOIT;
418 	if (!rqstp->rq_secure && rp->c_secure)
419 		goto out;
420 
421 	/* Compose RPC reply header */
422 	switch (rp->c_type) {
423 	case RC_NOCACHE:
424 		break;
425 	case RC_REPLSTAT:
426 		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
427 		rtn = RC_REPLY;
428 		break;
429 	case RC_REPLBUFF:
430 		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
431 			goto out;	/* should not happen */
432 		rtn = RC_REPLY;
433 		break;
434 	default:
435 		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
436 		nfsd_reply_cache_free_locked(rp);
437 	}
438 
439 	goto out;
440 }
441 
442 /*
443  * Update a cache entry. This is called from nfsd_dispatch when
444  * the procedure has been executed and the complete reply is in
445  * rqstp->rq_res.
446  *
447  * We're copying around data here rather than swapping buffers because
448  * the toplevel loop requires max-sized buffers, which would be a waste
449  * of memory for a cache with a max reply size of 100 bytes (diropokres).
450  *
451  * If we should start to use different types of cache entries tailored
452  * specifically for attrstat and fh's, we may save even more space.
453  *
454  * Also note that a cachetype of RC_NOCACHE can legally be passed when
455  * nfsd failed to encode a reply that otherwise would have been cached.
456  * In this case, nfsd_cache_update is called with statp == NULL.
457  */
458 void
459 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
460 {
461 	struct svc_cacherep *rp = rqstp->rq_cacherep;
462 	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
463 	int		len;
464 
465 	if (!rp)
466 		return;
467 
468 	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
469 	len >>= 2;
470 
471 	/* Don't cache excessive amounts of data and XDR failures */
472 	if (!statp || len > (256 >> 2)) {
473 		nfsd_reply_cache_free(rp);
474 		return;
475 	}
476 
477 	switch (cachetype) {
478 	case RC_REPLSTAT:
479 		if (len != 1)
480 			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
481 		rp->c_replstat = *statp;
482 		break;
483 	case RC_REPLBUFF:
484 		cachv = &rp->c_replvec;
485 		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
486 		if (!cachv->iov_base) {
487 			nfsd_reply_cache_free(rp);
488 			return;
489 		}
490 		cachv->iov_len = len << 2;
491 		memcpy(cachv->iov_base, statp, len << 2);
492 		break;
493 	case RC_NOCACHE:
494 		nfsd_reply_cache_free(rp);
495 		return;
496 	}
497 	spin_lock(&cache_lock);
498 	lru_put_end(rp);
499 	rp->c_secure = rqstp->rq_secure;
500 	rp->c_type = cachetype;
501 	rp->c_state = RC_DONE;
502 	spin_unlock(&cache_lock);
503 	return;
504 }
505 
506 /*
507  * Copy cached reply to current reply buffer. Should always fit.
508  * FIXME as reply is in a page, we should just attach the page, and
509  * keep a refcount....
510  */
511 static int
512 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
513 {
514 	struct kvec	*vec = &rqstp->rq_res.head[0];
515 
516 	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
517 		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
518 				data->iov_len);
519 		return 0;
520 	}
521 	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
522 	vec->iov_len += data->iov_len;
523 	return 1;
524 }
525