1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* NFS filesystem cache interface 3 * 4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/nfs_fs.h> 13 #include <linux/nfs_fs_sb.h> 14 #include <linux/in6.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/iversion.h> 18 19 #include "internal.h" 20 #include "iostat.h" 21 #include "fscache.h" 22 23 #define NFSDBG_FACILITY NFSDBG_FSCACHE 24 25 static struct rb_root nfs_fscache_keys = RB_ROOT; 26 static DEFINE_SPINLOCK(nfs_fscache_keys_lock); 27 28 /* 29 * Layout of the key for an NFS server cache object. 30 */ 31 struct nfs_server_key { 32 struct { 33 uint16_t nfsversion; /* NFS protocol version */ 34 uint32_t minorversion; /* NFSv4 minor version */ 35 uint16_t family; /* address family */ 36 __be16 port; /* IP port */ 37 } hdr; 38 union { 39 struct in_addr ipv4_addr; /* IPv4 address */ 40 struct in6_addr ipv6_addr; /* IPv6 address */ 41 }; 42 } __packed; 43 44 /* 45 * Get the per-client index cookie for an NFS client if the appropriate mount 46 * flag was set 47 * - We always try and get an index cookie for the client, but get filehandle 48 * cookies on a per-superblock basis, depending on the mount flags 49 */ 50 void nfs_fscache_get_client_cookie(struct nfs_client *clp) 51 { 52 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr; 53 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr; 54 struct nfs_server_key key; 55 uint16_t len = sizeof(key.hdr); 56 57 memset(&key, 0, sizeof(key)); 58 key.hdr.nfsversion = clp->rpc_ops->version; 59 key.hdr.minorversion = clp->cl_minorversion; 60 key.hdr.family = clp->cl_addr.ss_family; 61 62 switch (clp->cl_addr.ss_family) { 63 case AF_INET: 64 key.hdr.port = sin->sin_port; 65 key.ipv4_addr = sin->sin_addr; 66 len += sizeof(key.ipv4_addr); 67 break; 68 69 case AF_INET6: 70 key.hdr.port = sin6->sin6_port; 71 key.ipv6_addr = sin6->sin6_addr; 72 len += sizeof(key.ipv6_addr); 73 break; 74 75 default: 76 printk(KERN_WARNING "NFS: Unknown network family '%d'\n", 77 clp->cl_addr.ss_family); 78 clp->fscache = NULL; 79 return; 80 } 81 82 /* create a cache index for looking up filehandles */ 83 clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index, 84 &nfs_fscache_server_index_def, 85 &key, len, 86 NULL, 0, 87 clp, 0, true); 88 dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n", 89 clp, clp->fscache); 90 } 91 92 /* 93 * Dispose of a per-client cookie 94 */ 95 void nfs_fscache_release_client_cookie(struct nfs_client *clp) 96 { 97 dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n", 98 clp, clp->fscache); 99 100 fscache_relinquish_cookie(clp->fscache, NULL, false); 101 clp->fscache = NULL; 102 } 103 104 /* 105 * Get the cache cookie for an NFS superblock. We have to handle 106 * uniquification here because the cache doesn't do it for us. 107 * 108 * The default uniquifier is just an empty string, but it may be overridden 109 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent 110 * superblock across an automount point of some nature. 111 */ 112 void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen) 113 { 114 struct nfs_fscache_key *key, *xkey; 115 struct nfs_server *nfss = NFS_SB(sb); 116 struct rb_node **p, *parent; 117 int diff; 118 119 nfss->fscache_key = NULL; 120 nfss->fscache = NULL; 121 if (!(nfss->options & NFS_OPTION_FSCACHE)) 122 return; 123 if (!uniq) { 124 uniq = ""; 125 ulen = 1; 126 } 127 128 key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL); 129 if (!key) 130 return; 131 132 key->nfs_client = nfss->nfs_client; 133 key->key.super.s_flags = sb->s_flags & NFS_SB_MASK; 134 key->key.nfs_server.flags = nfss->flags; 135 key->key.nfs_server.rsize = nfss->rsize; 136 key->key.nfs_server.wsize = nfss->wsize; 137 key->key.nfs_server.acregmin = nfss->acregmin; 138 key->key.nfs_server.acregmax = nfss->acregmax; 139 key->key.nfs_server.acdirmin = nfss->acdirmin; 140 key->key.nfs_server.acdirmax = nfss->acdirmax; 141 key->key.nfs_server.fsid = nfss->fsid; 142 key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor; 143 144 key->key.uniq_len = ulen; 145 memcpy(key->key.uniquifier, uniq, ulen); 146 147 spin_lock(&nfs_fscache_keys_lock); 148 p = &nfs_fscache_keys.rb_node; 149 parent = NULL; 150 while (*p) { 151 parent = *p; 152 xkey = rb_entry(parent, struct nfs_fscache_key, node); 153 154 if (key->nfs_client < xkey->nfs_client) 155 goto go_left; 156 if (key->nfs_client > xkey->nfs_client) 157 goto go_right; 158 159 diff = memcmp(&key->key, &xkey->key, sizeof(key->key)); 160 if (diff < 0) 161 goto go_left; 162 if (diff > 0) 163 goto go_right; 164 165 if (key->key.uniq_len == 0) 166 goto non_unique; 167 diff = memcmp(key->key.uniquifier, 168 xkey->key.uniquifier, 169 key->key.uniq_len); 170 if (diff < 0) 171 goto go_left; 172 if (diff > 0) 173 goto go_right; 174 goto non_unique; 175 176 go_left: 177 p = &(*p)->rb_left; 178 continue; 179 go_right: 180 p = &(*p)->rb_right; 181 } 182 183 rb_link_node(&key->node, parent, p); 184 rb_insert_color(&key->node, &nfs_fscache_keys); 185 spin_unlock(&nfs_fscache_keys_lock); 186 nfss->fscache_key = key; 187 188 /* create a cache index for looking up filehandles */ 189 nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache, 190 &nfs_fscache_super_index_def, 191 key, sizeof(*key) + ulen, 192 NULL, 0, 193 nfss, 0, true); 194 dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n", 195 nfss, nfss->fscache); 196 return; 197 198 non_unique: 199 spin_unlock(&nfs_fscache_keys_lock); 200 kfree(key); 201 nfss->fscache_key = NULL; 202 nfss->fscache = NULL; 203 printk(KERN_WARNING "NFS:" 204 " Cache request denied due to non-unique superblock keys\n"); 205 } 206 207 /* 208 * release a per-superblock cookie 209 */ 210 void nfs_fscache_release_super_cookie(struct super_block *sb) 211 { 212 struct nfs_server *nfss = NFS_SB(sb); 213 214 dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n", 215 nfss, nfss->fscache); 216 217 fscache_relinquish_cookie(nfss->fscache, NULL, false); 218 nfss->fscache = NULL; 219 220 if (nfss->fscache_key) { 221 spin_lock(&nfs_fscache_keys_lock); 222 rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys); 223 spin_unlock(&nfs_fscache_keys_lock); 224 kfree(nfss->fscache_key); 225 nfss->fscache_key = NULL; 226 } 227 } 228 229 /* 230 * Initialise the per-inode cache cookie pointer for an NFS inode. 231 */ 232 void nfs_fscache_init_inode(struct inode *inode) 233 { 234 struct nfs_fscache_inode_auxdata auxdata; 235 struct nfs_server *nfss = NFS_SERVER(inode); 236 struct nfs_inode *nfsi = NFS_I(inode); 237 238 nfsi->fscache = NULL; 239 if (!(nfss->fscache && S_ISREG(inode->i_mode))) 240 return; 241 242 memset(&auxdata, 0, sizeof(auxdata)); 243 auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; 244 auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; 245 auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; 246 auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; 247 248 if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4) 249 auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode); 250 251 nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache, 252 &nfs_fscache_inode_object_def, 253 nfsi->fh.data, nfsi->fh.size, 254 &auxdata, sizeof(auxdata), 255 nfsi, nfsi->vfs_inode.i_size, false); 256 } 257 258 /* 259 * Release a per-inode cookie. 260 */ 261 void nfs_fscache_clear_inode(struct inode *inode) 262 { 263 struct nfs_fscache_inode_auxdata auxdata; 264 struct nfs_inode *nfsi = NFS_I(inode); 265 struct fscache_cookie *cookie = nfs_i_fscache(inode); 266 267 dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie); 268 269 memset(&auxdata, 0, sizeof(auxdata)); 270 auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; 271 auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; 272 auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; 273 auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; 274 fscache_relinquish_cookie(cookie, &auxdata, false); 275 nfsi->fscache = NULL; 276 } 277 278 static bool nfs_fscache_can_enable(void *data) 279 { 280 struct inode *inode = data; 281 282 return !inode_is_open_for_write(inode); 283 } 284 285 /* 286 * Enable or disable caching for a file that is being opened as appropriate. 287 * The cookie is allocated when the inode is initialised, but is not enabled at 288 * that time. Enablement is deferred to file-open time to avoid stat() and 289 * access() thrashing the cache. 290 * 291 * For now, with NFS, only regular files that are open read-only will be able 292 * to use the cache. 293 * 294 * We enable the cache for an inode if we open it read-only and it isn't 295 * currently open for writing. We disable the cache if the inode is open 296 * write-only. 297 * 298 * The caller uses the file struct to pin i_writecount on the inode before 299 * calling us when a file is opened for writing, so we can make use of that. 300 * 301 * Note that this may be invoked multiple times in parallel by parallel 302 * nfs_open() functions. 303 */ 304 void nfs_fscache_open_file(struct inode *inode, struct file *filp) 305 { 306 struct nfs_fscache_inode_auxdata auxdata; 307 struct nfs_inode *nfsi = NFS_I(inode); 308 struct fscache_cookie *cookie = nfs_i_fscache(inode); 309 310 if (!fscache_cookie_valid(cookie)) 311 return; 312 313 memset(&auxdata, 0, sizeof(auxdata)); 314 auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec; 315 auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec; 316 auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec; 317 auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec; 318 319 if (inode_is_open_for_write(inode)) { 320 dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); 321 clear_bit(NFS_INO_FSCACHE, &nfsi->flags); 322 fscache_disable_cookie(cookie, &auxdata, true); 323 fscache_uncache_all_inode_pages(cookie, inode); 324 } else { 325 dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi); 326 fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size, 327 nfs_fscache_can_enable, inode); 328 if (fscache_cookie_enabled(cookie)) 329 set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); 330 } 331 } 332 EXPORT_SYMBOL_GPL(nfs_fscache_open_file); 333 334 /* 335 * Release the caching state associated with a page, if the page isn't busy 336 * interacting with the cache. 337 * - Returns true (can release page) or false (page busy). 338 */ 339 int nfs_fscache_release_page(struct page *page, gfp_t gfp) 340 { 341 if (PageFsCache(page)) { 342 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); 343 344 BUG_ON(!cookie); 345 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 346 cookie, page, NFS_I(page->mapping->host)); 347 348 if (!fscache_maybe_release_page(cookie, page, gfp)) 349 return 0; 350 351 nfs_inc_fscache_stats(page->mapping->host, 352 NFSIOS_FSCACHE_PAGES_UNCACHED); 353 } 354 355 return 1; 356 } 357 358 /* 359 * Release the caching state associated with a page if undergoing complete page 360 * invalidation. 361 */ 362 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode) 363 { 364 struct fscache_cookie *cookie = nfs_i_fscache(inode); 365 366 BUG_ON(!cookie); 367 368 dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n", 369 cookie, page, NFS_I(inode)); 370 371 fscache_wait_on_page_write(cookie, page); 372 373 BUG_ON(!PageLocked(page)); 374 fscache_uncache_page(cookie, page); 375 nfs_inc_fscache_stats(page->mapping->host, 376 NFSIOS_FSCACHE_PAGES_UNCACHED); 377 } 378 379 /* 380 * Handle completion of a page being read from the cache. 381 * - Called in process (keventd) context. 382 */ 383 static void nfs_readpage_from_fscache_complete(struct page *page, 384 void *context, 385 int error) 386 { 387 dfprintk(FSCACHE, 388 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n", 389 page, context, error); 390 391 /* if the read completes with an error, we just unlock the page and let 392 * the VM reissue the readpage */ 393 if (!error) { 394 SetPageUptodate(page); 395 unlock_page(page); 396 } else { 397 error = nfs_readpage_async(context, page->mapping->host, page); 398 if (error) 399 unlock_page(page); 400 } 401 } 402 403 /* 404 * Retrieve a page from fscache 405 */ 406 int __nfs_readpage_from_fscache(struct nfs_open_context *ctx, 407 struct inode *inode, struct page *page) 408 { 409 int ret; 410 411 dfprintk(FSCACHE, 412 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n", 413 nfs_i_fscache(inode), page, page->index, page->flags, inode); 414 415 ret = fscache_read_or_alloc_page(nfs_i_fscache(inode), 416 page, 417 nfs_readpage_from_fscache_complete, 418 ctx, 419 GFP_KERNEL); 420 421 switch (ret) { 422 case 0: /* read BIO submitted (page in fscache) */ 423 dfprintk(FSCACHE, 424 "NFS: readpage_from_fscache: BIO submitted\n"); 425 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK); 426 return ret; 427 428 case -ENOBUFS: /* inode not in cache */ 429 case -ENODATA: /* page not in cache */ 430 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL); 431 dfprintk(FSCACHE, 432 "NFS: readpage_from_fscache %d\n", ret); 433 return 1; 434 435 default: 436 dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret); 437 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL); 438 } 439 return ret; 440 } 441 442 /* 443 * Retrieve a set of pages from fscache 444 */ 445 int __nfs_readpages_from_fscache(struct nfs_open_context *ctx, 446 struct inode *inode, 447 struct address_space *mapping, 448 struct list_head *pages, 449 unsigned *nr_pages) 450 { 451 unsigned npages = *nr_pages; 452 int ret; 453 454 dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n", 455 nfs_i_fscache(inode), npages, inode); 456 457 ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode), 458 mapping, pages, nr_pages, 459 nfs_readpage_from_fscache_complete, 460 ctx, 461 mapping_gfp_mask(mapping)); 462 if (*nr_pages < npages) 463 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 464 npages); 465 if (*nr_pages > 0) 466 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 467 *nr_pages); 468 469 switch (ret) { 470 case 0: /* read submitted to the cache for all pages */ 471 BUG_ON(!list_empty(pages)); 472 BUG_ON(*nr_pages != 0); 473 dfprintk(FSCACHE, 474 "NFS: nfs_getpages_from_fscache: submitted\n"); 475 476 return ret; 477 478 case -ENOBUFS: /* some pages aren't cached and can't be */ 479 case -ENODATA: /* some pages aren't cached */ 480 dfprintk(FSCACHE, 481 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret); 482 return 1; 483 484 default: 485 dfprintk(FSCACHE, 486 "NFS: nfs_getpages_from_fscache: ret %d\n", ret); 487 } 488 489 return ret; 490 } 491 492 /* 493 * Store a newly fetched page in fscache 494 * - PG_fscache must be set on the page 495 */ 496 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync) 497 { 498 int ret; 499 500 dfprintk(FSCACHE, 501 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n", 502 nfs_i_fscache(inode), page, page->index, page->flags, sync); 503 504 ret = fscache_write_page(nfs_i_fscache(inode), page, 505 inode->i_size, GFP_KERNEL); 506 dfprintk(FSCACHE, 507 "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n", 508 page, page->index, page->flags, ret); 509 510 if (ret != 0) { 511 fscache_uncache_page(nfs_i_fscache(inode), page); 512 nfs_inc_fscache_stats(inode, 513 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL); 514 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED); 515 } else { 516 nfs_inc_fscache_stats(inode, 517 NFSIOS_FSCACHE_PAGES_WRITTEN_OK); 518 } 519 } 520