xref: /openbmc/linux/fs/nfsd/nfscache.c (revision 3996eabb)
1 /*
2  * Request reply cache. This is currently a global cache, but this may
3  * change in the future and be a per-client cache.
4  *
5  * This code is heavily inspired by the 44BSD implementation, although
6  * it does things a bit differently.
7  *
8  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/slab.h>
12 
13 #include "nfsd.h"
14 #include "cache.h"
15 
16 /* Size of reply cache. Common values are:
17  * 4.3BSD:	128
18  * 4.4BSD:	256
19  * Solaris2:	1024
20  * DEC Unix:	512-4096
21  */
22 #define CACHESIZE		1024
23 #define HASHSIZE		64
24 
25 static struct hlist_head *	cache_hash;
26 static struct list_head 	lru_head;
27 static int			cache_disabled = 1;
28 
29 /*
30  * Calculate the hash index from an XID.
31  */
32 static inline u32 request_hash(u32 xid)
33 {
34 	u32 h = xid;
35 	h ^= (xid >> 24);
36 	return h & (HASHSIZE-1);
37 }
38 
39 static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
40 
41 /*
42  * locking for the reply cache:
43  * A cache entry is "single use" if c_state == RC_INPROG
44  * Otherwise, it when accessing _prev or _next, the lock must be held.
45  */
46 static DEFINE_SPINLOCK(cache_lock);
47 
48 int nfsd_reply_cache_init(void)
49 {
50 	struct svc_cacherep	*rp;
51 	int			i;
52 
53 	INIT_LIST_HEAD(&lru_head);
54 	i = CACHESIZE;
55 	while (i) {
56 		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
57 		if (!rp)
58 			goto out_nomem;
59 		list_add(&rp->c_lru, &lru_head);
60 		rp->c_state = RC_UNUSED;
61 		rp->c_type = RC_NOCACHE;
62 		INIT_HLIST_NODE(&rp->c_hash);
63 		i--;
64 	}
65 
66 	cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
67 	if (!cache_hash)
68 		goto out_nomem;
69 
70 	cache_disabled = 0;
71 	return 0;
72 out_nomem:
73 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
74 	nfsd_reply_cache_shutdown();
75 	return -ENOMEM;
76 }
77 
78 void nfsd_reply_cache_shutdown(void)
79 {
80 	struct svc_cacherep	*rp;
81 
82 	while (!list_empty(&lru_head)) {
83 		rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
84 		if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
85 			kfree(rp->c_replvec.iov_base);
86 		list_del(&rp->c_lru);
87 		kfree(rp);
88 	}
89 
90 	cache_disabled = 1;
91 
92 	kfree (cache_hash);
93 	cache_hash = NULL;
94 }
95 
96 /*
97  * Move cache entry to end of LRU list
98  */
99 static void
100 lru_put_end(struct svc_cacherep *rp)
101 {
102 	list_move_tail(&rp->c_lru, &lru_head);
103 }
104 
105 /*
106  * Move a cache entry from one hash list to another
107  */
108 static void
109 hash_refile(struct svc_cacherep *rp)
110 {
111 	hlist_del_init(&rp->c_hash);
112 	hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
113 }
114 
115 /*
116  * Try to find an entry matching the current call in the cache. When none
117  * is found, we grab the oldest unlocked entry off the LRU list.
118  * Note that no operation within the loop may sleep.
119  */
120 int
121 nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
122 {
123 	struct hlist_node	*hn;
124 	struct hlist_head 	*rh;
125 	struct svc_cacherep	*rp;
126 	__be32			xid = rqstp->rq_xid;
127 	u32			proto =  rqstp->rq_prot,
128 				vers = rqstp->rq_vers,
129 				proc = rqstp->rq_proc;
130 	unsigned long		age;
131 	int rtn;
132 
133 	rqstp->rq_cacherep = NULL;
134 	if (cache_disabled || type == RC_NOCACHE) {
135 		nfsdstats.rcnocache++;
136 		return RC_DOIT;
137 	}
138 
139 	spin_lock(&cache_lock);
140 	rtn = RC_DOIT;
141 
142 	rh = &cache_hash[request_hash(xid)];
143 	hlist_for_each_entry(rp, hn, rh, c_hash) {
144 		if (rp->c_state != RC_UNUSED &&
145 		    xid == rp->c_xid && proc == rp->c_proc &&
146 		    proto == rp->c_prot && vers == rp->c_vers &&
147 		    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
148 		    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
149 			nfsdstats.rchits++;
150 			goto found_entry;
151 		}
152 	}
153 	nfsdstats.rcmisses++;
154 
155 	/* This loop shouldn't take more than a few iterations normally */
156 	{
157 	int	safe = 0;
158 	list_for_each_entry(rp, &lru_head, c_lru) {
159 		if (rp->c_state != RC_INPROG)
160 			break;
161 		if (safe++ > CACHESIZE) {
162 			printk("nfsd: loop in repcache LRU list\n");
163 			cache_disabled = 1;
164 			goto out;
165 		}
166 	}
167 	}
168 
169 	/* All entries on the LRU are in-progress. This should not happen */
170 	if (&rp->c_lru == &lru_head) {
171 		static int	complaints;
172 
173 		printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
174 		if (++complaints > 5) {
175 			printk(KERN_WARNING "nfsd: disabling repcache.\n");
176 			cache_disabled = 1;
177 		}
178 		goto out;
179 	}
180 
181 	rqstp->rq_cacherep = rp;
182 	rp->c_state = RC_INPROG;
183 	rp->c_xid = xid;
184 	rp->c_proc = proc;
185 	memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
186 	rp->c_prot = proto;
187 	rp->c_vers = vers;
188 	rp->c_timestamp = jiffies;
189 
190 	hash_refile(rp);
191 
192 	/* release any buffer */
193 	if (rp->c_type == RC_REPLBUFF) {
194 		kfree(rp->c_replvec.iov_base);
195 		rp->c_replvec.iov_base = NULL;
196 	}
197 	rp->c_type = RC_NOCACHE;
198  out:
199 	spin_unlock(&cache_lock);
200 	return rtn;
201 
202 found_entry:
203 	/* We found a matching entry which is either in progress or done. */
204 	age = jiffies - rp->c_timestamp;
205 	rp->c_timestamp = jiffies;
206 	lru_put_end(rp);
207 
208 	rtn = RC_DROPIT;
209 	/* Request being processed or excessive rexmits */
210 	if (rp->c_state == RC_INPROG || age < RC_DELAY)
211 		goto out;
212 
213 	/* From the hall of fame of impractical attacks:
214 	 * Is this a user who tries to snoop on the cache? */
215 	rtn = RC_DOIT;
216 	if (!rqstp->rq_secure && rp->c_secure)
217 		goto out;
218 
219 	/* Compose RPC reply header */
220 	switch (rp->c_type) {
221 	case RC_NOCACHE:
222 		break;
223 	case RC_REPLSTAT:
224 		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
225 		rtn = RC_REPLY;
226 		break;
227 	case RC_REPLBUFF:
228 		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
229 			goto out;	/* should not happen */
230 		rtn = RC_REPLY;
231 		break;
232 	default:
233 		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
234 		rp->c_state = RC_UNUSED;
235 	}
236 
237 	goto out;
238 }
239 
240 /*
241  * Update a cache entry. This is called from nfsd_dispatch when
242  * the procedure has been executed and the complete reply is in
243  * rqstp->rq_res.
244  *
245  * We're copying around data here rather than swapping buffers because
246  * the toplevel loop requires max-sized buffers, which would be a waste
247  * of memory for a cache with a max reply size of 100 bytes (diropokres).
248  *
249  * If we should start to use different types of cache entries tailored
250  * specifically for attrstat and fh's, we may save even more space.
251  *
252  * Also note that a cachetype of RC_NOCACHE can legally be passed when
253  * nfsd failed to encode a reply that otherwise would have been cached.
254  * In this case, nfsd_cache_update is called with statp == NULL.
255  */
256 void
257 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
258 {
259 	struct svc_cacherep *rp;
260 	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
261 	int		len;
262 
263 	if (!(rp = rqstp->rq_cacherep) || cache_disabled)
264 		return;
265 
266 	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
267 	len >>= 2;
268 
269 	/* Don't cache excessive amounts of data and XDR failures */
270 	if (!statp || len > (256 >> 2)) {
271 		rp->c_state = RC_UNUSED;
272 		return;
273 	}
274 
275 	switch (cachetype) {
276 	case RC_REPLSTAT:
277 		if (len != 1)
278 			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
279 		rp->c_replstat = *statp;
280 		break;
281 	case RC_REPLBUFF:
282 		cachv = &rp->c_replvec;
283 		cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
284 		if (!cachv->iov_base) {
285 			spin_lock(&cache_lock);
286 			rp->c_state = RC_UNUSED;
287 			spin_unlock(&cache_lock);
288 			return;
289 		}
290 		cachv->iov_len = len << 2;
291 		memcpy(cachv->iov_base, statp, len << 2);
292 		break;
293 	}
294 	spin_lock(&cache_lock);
295 	lru_put_end(rp);
296 	rp->c_secure = rqstp->rq_secure;
297 	rp->c_type = cachetype;
298 	rp->c_state = RC_DONE;
299 	rp->c_timestamp = jiffies;
300 	spin_unlock(&cache_lock);
301 	return;
302 }
303 
304 /*
305  * Copy cached reply to current reply buffer. Should always fit.
306  * FIXME as reply is in a page, we should just attach the page, and
307  * keep a refcount....
308  */
309 static int
310 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
311 {
312 	struct kvec	*vec = &rqstp->rq_res.head[0];
313 
314 	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
315 		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
316 				data->iov_len);
317 		return 0;
318 	}
319 	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
320 	vec->iov_len += data->iov_len;
321 	return 1;
322 }
323