xref: /openbmc/linux/fs/nfs/fscache.c (revision 12eb4683)
1 /* NFS filesystem cache interface
2  *
3  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_fs_sb.h>
18 #include <linux/in6.h>
19 #include <linux/seq_file.h>
20 #include <linux/slab.h>
21 
22 #include "internal.h"
23 #include "iostat.h"
24 #include "fscache.h"
25 
26 #define NFSDBG_FACILITY		NFSDBG_FSCACHE
27 
28 static struct rb_root nfs_fscache_keys = RB_ROOT;
29 static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
30 
31 /*
32  * Get the per-client index cookie for an NFS client if the appropriate mount
33  * flag was set
34  * - We always try and get an index cookie for the client, but get filehandle
35  *   cookies on a per-superblock basis, depending on the mount flags
36  */
37 void nfs_fscache_get_client_cookie(struct nfs_client *clp)
38 {
39 	/* create a cache index for looking up filehandles */
40 	clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
41 					      &nfs_fscache_server_index_def,
42 					      clp, true);
43 	dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
44 		 clp, clp->fscache);
45 }
46 
47 /*
48  * Dispose of a per-client cookie
49  */
50 void nfs_fscache_release_client_cookie(struct nfs_client *clp)
51 {
52 	dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
53 		 clp, clp->fscache);
54 
55 	fscache_relinquish_cookie(clp->fscache, 0);
56 	clp->fscache = NULL;
57 }
58 
59 /*
60  * Get the cache cookie for an NFS superblock.  We have to handle
61  * uniquification here because the cache doesn't do it for us.
62  *
63  * The default uniquifier is just an empty string, but it may be overridden
64  * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
65  * superblock across an automount point of some nature.
66  */
67 void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
68 {
69 	struct nfs_fscache_key *key, *xkey;
70 	struct nfs_server *nfss = NFS_SB(sb);
71 	struct rb_node **p, *parent;
72 	int diff;
73 
74 	if (!uniq) {
75 		uniq = "";
76 		ulen = 1;
77 	}
78 
79 	key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
80 	if (!key)
81 		return;
82 
83 	key->nfs_client = nfss->nfs_client;
84 	key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
85 	key->key.nfs_server.flags = nfss->flags;
86 	key->key.nfs_server.rsize = nfss->rsize;
87 	key->key.nfs_server.wsize = nfss->wsize;
88 	key->key.nfs_server.acregmin = nfss->acregmin;
89 	key->key.nfs_server.acregmax = nfss->acregmax;
90 	key->key.nfs_server.acdirmin = nfss->acdirmin;
91 	key->key.nfs_server.acdirmax = nfss->acdirmax;
92 	key->key.nfs_server.fsid = nfss->fsid;
93 	key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
94 
95 	key->key.uniq_len = ulen;
96 	memcpy(key->key.uniquifier, uniq, ulen);
97 
98 	spin_lock(&nfs_fscache_keys_lock);
99 	p = &nfs_fscache_keys.rb_node;
100 	parent = NULL;
101 	while (*p) {
102 		parent = *p;
103 		xkey = rb_entry(parent, struct nfs_fscache_key, node);
104 
105 		if (key->nfs_client < xkey->nfs_client)
106 			goto go_left;
107 		if (key->nfs_client > xkey->nfs_client)
108 			goto go_right;
109 
110 		diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
111 		if (diff < 0)
112 			goto go_left;
113 		if (diff > 0)
114 			goto go_right;
115 
116 		if (key->key.uniq_len == 0)
117 			goto non_unique;
118 		diff = memcmp(key->key.uniquifier,
119 			      xkey->key.uniquifier,
120 			      key->key.uniq_len);
121 		if (diff < 0)
122 			goto go_left;
123 		if (diff > 0)
124 			goto go_right;
125 		goto non_unique;
126 
127 	go_left:
128 		p = &(*p)->rb_left;
129 		continue;
130 	go_right:
131 		p = &(*p)->rb_right;
132 	}
133 
134 	rb_link_node(&key->node, parent, p);
135 	rb_insert_color(&key->node, &nfs_fscache_keys);
136 	spin_unlock(&nfs_fscache_keys_lock);
137 	nfss->fscache_key = key;
138 
139 	/* create a cache index for looking up filehandles */
140 	nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
141 					       &nfs_fscache_super_index_def,
142 					       nfss, true);
143 	dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
144 		 nfss, nfss->fscache);
145 	return;
146 
147 non_unique:
148 	spin_unlock(&nfs_fscache_keys_lock);
149 	kfree(key);
150 	nfss->fscache_key = NULL;
151 	nfss->fscache = NULL;
152 	printk(KERN_WARNING "NFS:"
153 	       " Cache request denied due to non-unique superblock keys\n");
154 }
155 
156 /*
157  * release a per-superblock cookie
158  */
159 void nfs_fscache_release_super_cookie(struct super_block *sb)
160 {
161 	struct nfs_server *nfss = NFS_SB(sb);
162 
163 	dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
164 		 nfss, nfss->fscache);
165 
166 	fscache_relinquish_cookie(nfss->fscache, 0);
167 	nfss->fscache = NULL;
168 
169 	if (nfss->fscache_key) {
170 		spin_lock(&nfs_fscache_keys_lock);
171 		rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
172 		spin_unlock(&nfs_fscache_keys_lock);
173 		kfree(nfss->fscache_key);
174 		nfss->fscache_key = NULL;
175 	}
176 }
177 
178 /*
179  * Initialise the per-inode cache cookie pointer for an NFS inode.
180  */
181 void nfs_fscache_init_inode(struct inode *inode)
182 {
183 	struct nfs_inode *nfsi = NFS_I(inode);
184 
185 	nfsi->fscache = NULL;
186 	if (!S_ISREG(inode->i_mode))
187 		return;
188 	nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
189 					       &nfs_fscache_inode_object_def,
190 					       nfsi, false);
191 }
192 
193 /*
194  * Release a per-inode cookie.
195  */
196 void nfs_fscache_clear_inode(struct inode *inode)
197 {
198 	struct nfs_inode *nfsi = NFS_I(inode);
199 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
200 
201 	dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
202 
203 	fscache_relinquish_cookie(cookie, false);
204 	nfsi->fscache = NULL;
205 }
206 
207 static bool nfs_fscache_can_enable(void *data)
208 {
209 	struct inode *inode = data;
210 
211 	return !inode_is_open_for_write(inode);
212 }
213 
214 /*
215  * Enable or disable caching for a file that is being opened as appropriate.
216  * The cookie is allocated when the inode is initialised, but is not enabled at
217  * that time.  Enablement is deferred to file-open time to avoid stat() and
218  * access() thrashing the cache.
219  *
220  * For now, with NFS, only regular files that are open read-only will be able
221  * to use the cache.
222  *
223  * We enable the cache for an inode if we open it read-only and it isn't
224  * currently open for writing.  We disable the cache if the inode is open
225  * write-only.
226  *
227  * The caller uses the file struct to pin i_writecount on the inode before
228  * calling us when a file is opened for writing, so we can make use of that.
229  *
230  * Note that this may be invoked multiple times in parallel by parallel
231  * nfs_open() functions.
232  */
233 void nfs_fscache_open_file(struct inode *inode, struct file *filp)
234 {
235 	struct nfs_inode *nfsi = NFS_I(inode);
236 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
237 
238 	if (!fscache_cookie_valid(cookie))
239 		return;
240 
241 	if (inode_is_open_for_write(inode)) {
242 		dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
243 		clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
244 		fscache_disable_cookie(cookie, true);
245 		fscache_uncache_all_inode_pages(cookie, inode);
246 	} else {
247 		dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
248 		fscache_enable_cookie(cookie, nfs_fscache_can_enable, inode);
249 		if (fscache_cookie_enabled(cookie))
250 			set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
251 	}
252 }
253 EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
254 
255 /*
256  * Release the caching state associated with a page, if the page isn't busy
257  * interacting with the cache.
258  * - Returns true (can release page) or false (page busy).
259  */
260 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
261 {
262 	if (PageFsCache(page)) {
263 		struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
264 
265 		BUG_ON(!cookie);
266 		dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
267 			 cookie, page, NFS_I(page->mapping->host));
268 
269 		if (!fscache_maybe_release_page(cookie, page, gfp))
270 			return 0;
271 
272 		nfs_add_fscache_stats(page->mapping->host,
273 				      NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
274 	}
275 
276 	return 1;
277 }
278 
279 /*
280  * Release the caching state associated with a page if undergoing complete page
281  * invalidation.
282  */
283 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
284 {
285 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
286 
287 	BUG_ON(!cookie);
288 
289 	dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
290 		 cookie, page, NFS_I(inode));
291 
292 	fscache_wait_on_page_write(cookie, page);
293 
294 	BUG_ON(!PageLocked(page));
295 	fscache_uncache_page(cookie, page);
296 	nfs_add_fscache_stats(page->mapping->host,
297 			      NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
298 }
299 
300 /*
301  * Handle completion of a page being read from the cache.
302  * - Called in process (keventd) context.
303  */
304 static void nfs_readpage_from_fscache_complete(struct page *page,
305 					       void *context,
306 					       int error)
307 {
308 	dfprintk(FSCACHE,
309 		 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
310 		 page, context, error);
311 
312 	/* if the read completes with an error, we just unlock the page and let
313 	 * the VM reissue the readpage */
314 	if (!error) {
315 		SetPageUptodate(page);
316 		unlock_page(page);
317 	} else {
318 		error = nfs_readpage_async(context, page->mapping->host, page);
319 		if (error)
320 			unlock_page(page);
321 	}
322 }
323 
324 /*
325  * Retrieve a page from fscache
326  */
327 int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
328 				struct inode *inode, struct page *page)
329 {
330 	int ret;
331 
332 	dfprintk(FSCACHE,
333 		 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
334 		 nfs_i_fscache(inode), page, page->index, page->flags, inode);
335 
336 	ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
337 					 page,
338 					 nfs_readpage_from_fscache_complete,
339 					 ctx,
340 					 GFP_KERNEL);
341 
342 	switch (ret) {
343 	case 0: /* read BIO submitted (page in fscache) */
344 		dfprintk(FSCACHE,
345 			 "NFS:    readpage_from_fscache: BIO submitted\n");
346 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 1);
347 		return ret;
348 
349 	case -ENOBUFS: /* inode not in cache */
350 	case -ENODATA: /* page not in cache */
351 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
352 		dfprintk(FSCACHE,
353 			 "NFS:    readpage_from_fscache %d\n", ret);
354 		return 1;
355 
356 	default:
357 		dfprintk(FSCACHE, "NFS:    readpage_from_fscache %d\n", ret);
358 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
359 	}
360 	return ret;
361 }
362 
363 /*
364  * Retrieve a set of pages from fscache
365  */
366 int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
367 				 struct inode *inode,
368 				 struct address_space *mapping,
369 				 struct list_head *pages,
370 				 unsigned *nr_pages)
371 {
372 	unsigned npages = *nr_pages;
373 	int ret;
374 
375 	dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
376 		 nfs_i_fscache(inode), npages, inode);
377 
378 	ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
379 					  mapping, pages, nr_pages,
380 					  nfs_readpage_from_fscache_complete,
381 					  ctx,
382 					  mapping_gfp_mask(mapping));
383 	if (*nr_pages < npages)
384 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
385 				      npages);
386 	if (*nr_pages > 0)
387 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
388 				      *nr_pages);
389 
390 	switch (ret) {
391 	case 0: /* read submitted to the cache for all pages */
392 		BUG_ON(!list_empty(pages));
393 		BUG_ON(*nr_pages != 0);
394 		dfprintk(FSCACHE,
395 			 "NFS: nfs_getpages_from_fscache: submitted\n");
396 
397 		return ret;
398 
399 	case -ENOBUFS: /* some pages aren't cached and can't be */
400 	case -ENODATA: /* some pages aren't cached */
401 		dfprintk(FSCACHE,
402 			 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
403 		return 1;
404 
405 	default:
406 		dfprintk(FSCACHE,
407 			 "NFS: nfs_getpages_from_fscache: ret  %d\n", ret);
408 	}
409 
410 	return ret;
411 }
412 
413 /*
414  * Store a newly fetched page in fscache
415  * - PG_fscache must be set on the page
416  */
417 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
418 {
419 	int ret;
420 
421 	dfprintk(FSCACHE,
422 		 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
423 		 nfs_i_fscache(inode), page, page->index, page->flags, sync);
424 
425 	ret = fscache_write_page(nfs_i_fscache(inode), page, GFP_KERNEL);
426 	dfprintk(FSCACHE,
427 		 "NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
428 		 page, page->index, page->flags, ret);
429 
430 	if (ret != 0) {
431 		fscache_uncache_page(nfs_i_fscache(inode), page);
432 		nfs_add_fscache_stats(inode,
433 				      NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
434 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
435 	} else {
436 		nfs_add_fscache_stats(inode,
437 				      NFSIOS_FSCACHE_PAGES_WRITTEN_OK, 1);
438 	}
439 }
440