1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* NFS filesystem cache interface 3 * 4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/nfs_fs.h> 13 #include <linux/nfs_fs_sb.h> 14 #include <linux/in6.h> 15 #include <linux/seq_file.h> 16 #include <linux/slab.h> 17 #include <linux/iversion.h> 18 #include <linux/xarray.h> 19 #include <linux/fscache.h> 20 #include <linux/netfs.h> 21 22 #include "internal.h" 23 #include "iostat.h" 24 #include "fscache.h" 25 #include "nfstrace.h" 26 27 #define NFS_MAX_KEY_LEN 1000 28 29 static bool nfs_append_int(char *key, int *_len, unsigned long long x) 30 { 31 if (*_len > NFS_MAX_KEY_LEN) 32 return false; 33 if (x == 0) 34 key[(*_len)++] = ','; 35 else 36 *_len += sprintf(key + *_len, ",%llx", x); 37 return true; 38 } 39 40 /* 41 * Get the per-client index cookie for an NFS client if the appropriate mount 42 * flag was set 43 * - We always try and get an index cookie for the client, but get filehandle 44 * cookies on a per-superblock basis, depending on the mount flags 45 */ 46 static bool nfs_fscache_get_client_key(struct nfs_client *clp, 47 char *key, int *_len) 48 { 49 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr; 50 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr; 51 52 *_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len, 53 ",%u.%u,%x", 54 clp->rpc_ops->version, 55 clp->cl_minorversion, 56 clp->cl_addr.ss_family); 57 58 switch (clp->cl_addr.ss_family) { 59 case AF_INET: 60 if (!nfs_append_int(key, _len, sin->sin_port) || 61 !nfs_append_int(key, _len, sin->sin_addr.s_addr)) 62 return false; 63 return true; 64 65 case AF_INET6: 66 if (!nfs_append_int(key, _len, sin6->sin6_port) || 67 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) || 68 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) || 69 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) || 70 !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3])) 71 return false; 72 return true; 73 74 default: 75 printk(KERN_WARNING "NFS: Unknown network family '%d'\n", 76 clp->cl_addr.ss_family); 77 return false; 78 } 79 } 80 81 /* 82 * Get the cache cookie for an NFS superblock. 83 * 84 * The default uniquifier is just an empty string, but it may be overridden 85 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent 86 * superblock across an automount point of some nature. 87 */ 88 int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen) 89 { 90 struct fscache_volume *vcookie; 91 struct nfs_server *nfss = NFS_SB(sb); 92 unsigned int len = 3; 93 char *key; 94 95 if (uniq) { 96 nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL); 97 if (!nfss->fscache_uniq) 98 return -ENOMEM; 99 } 100 101 key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL); 102 if (!key) 103 return -ENOMEM; 104 105 memcpy(key, "nfs", 3); 106 if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) || 107 !nfs_append_int(key, &len, nfss->fsid.major) || 108 !nfs_append_int(key, &len, nfss->fsid.minor) || 109 !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) || 110 !nfs_append_int(key, &len, nfss->flags) || 111 !nfs_append_int(key, &len, nfss->rsize) || 112 !nfs_append_int(key, &len, nfss->wsize) || 113 !nfs_append_int(key, &len, nfss->acregmin) || 114 !nfs_append_int(key, &len, nfss->acregmax) || 115 !nfs_append_int(key, &len, nfss->acdirmin) || 116 !nfs_append_int(key, &len, nfss->acdirmax) || 117 !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor)) 118 goto out; 119 120 if (ulen > 0) { 121 if (ulen > NFS_MAX_KEY_LEN - len) 122 goto out; 123 key[len++] = ','; 124 memcpy(key + len, uniq, ulen); 125 len += ulen; 126 } 127 key[len] = 0; 128 129 /* create a cache index for looking up filehandles */ 130 vcookie = fscache_acquire_volume(key, 131 NULL, /* preferred_cache */ 132 NULL, 0 /* coherency_data */); 133 if (IS_ERR(vcookie)) { 134 if (vcookie != ERR_PTR(-EBUSY)) { 135 kfree(key); 136 return PTR_ERR(vcookie); 137 } 138 pr_err("NFS: Cache volume key already in use (%s)\n", key); 139 vcookie = NULL; 140 } 141 nfss->fscache = vcookie; 142 143 out: 144 kfree(key); 145 return 0; 146 } 147 148 /* 149 * release a per-superblock cookie 150 */ 151 void nfs_fscache_release_super_cookie(struct super_block *sb) 152 { 153 struct nfs_server *nfss = NFS_SB(sb); 154 155 fscache_relinquish_volume(nfss->fscache, NULL, false); 156 nfss->fscache = NULL; 157 kfree(nfss->fscache_uniq); 158 } 159 160 /* 161 * Initialise the per-inode cache cookie pointer for an NFS inode. 162 */ 163 void nfs_fscache_init_inode(struct inode *inode) 164 { 165 struct nfs_fscache_inode_auxdata auxdata; 166 struct nfs_server *nfss = NFS_SERVER(inode); 167 struct nfs_inode *nfsi = NFS_I(inode); 168 169 netfs_inode(inode)->cache = NULL; 170 if (!(nfss->fscache && S_ISREG(inode->i_mode))) 171 return; 172 173 nfs_fscache_update_auxdata(&auxdata, inode); 174 175 netfs_inode(inode)->cache = fscache_acquire_cookie( 176 nfss->fscache, 177 0, 178 nfsi->fh.data, /* index_key */ 179 nfsi->fh.size, 180 &auxdata, /* aux_data */ 181 sizeof(auxdata), 182 i_size_read(inode)); 183 } 184 185 /* 186 * Release a per-inode cookie. 187 */ 188 void nfs_fscache_clear_inode(struct inode *inode) 189 { 190 fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false); 191 netfs_inode(inode)->cache = NULL; 192 } 193 194 /* 195 * Enable or disable caching for a file that is being opened as appropriate. 196 * The cookie is allocated when the inode is initialised, but is not enabled at 197 * that time. Enablement is deferred to file-open time to avoid stat() and 198 * access() thrashing the cache. 199 * 200 * For now, with NFS, only regular files that are open read-only will be able 201 * to use the cache. 202 * 203 * We enable the cache for an inode if we open it read-only and it isn't 204 * currently open for writing. We disable the cache if the inode is open 205 * write-only. 206 * 207 * The caller uses the file struct to pin i_writecount on the inode before 208 * calling us when a file is opened for writing, so we can make use of that. 209 * 210 * Note that this may be invoked multiple times in parallel by parallel 211 * nfs_open() functions. 212 */ 213 void nfs_fscache_open_file(struct inode *inode, struct file *filp) 214 { 215 struct nfs_fscache_inode_auxdata auxdata; 216 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 217 bool open_for_write = inode_is_open_for_write(inode); 218 219 if (!fscache_cookie_valid(cookie)) 220 return; 221 222 fscache_use_cookie(cookie, open_for_write); 223 if (open_for_write) { 224 nfs_fscache_update_auxdata(&auxdata, inode); 225 fscache_invalidate(cookie, &auxdata, i_size_read(inode), 226 FSCACHE_INVAL_DIO_WRITE); 227 } 228 } 229 EXPORT_SYMBOL_GPL(nfs_fscache_open_file); 230 231 void nfs_fscache_release_file(struct inode *inode, struct file *filp) 232 { 233 struct nfs_fscache_inode_auxdata auxdata; 234 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); 235 loff_t i_size = i_size_read(inode); 236 237 nfs_fscache_update_auxdata(&auxdata, inode); 238 fscache_unuse_cookie(cookie, &auxdata, &i_size); 239 } 240 241 int nfs_netfs_read_folio(struct file *file, struct folio *folio) 242 { 243 if (!netfs_inode(folio_inode(folio))->cache) 244 return -ENOBUFS; 245 246 return netfs_read_folio(file, folio); 247 } 248 249 int nfs_netfs_readahead(struct readahead_control *ractl) 250 { 251 struct inode *inode = ractl->mapping->host; 252 253 if (!netfs_inode(inode)->cache) 254 return -ENOBUFS; 255 256 netfs_readahead(ractl); 257 return 0; 258 } 259 260 static atomic_t nfs_netfs_debug_id; 261 static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file) 262 { 263 rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file)); 264 rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id); 265 266 return 0; 267 } 268 269 static void nfs_netfs_free_request(struct netfs_io_request *rreq) 270 { 271 put_nfs_open_context(rreq->netfs_priv); 272 } 273 274 static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq) 275 { 276 return fscache_begin_read_operation(&rreq->cache_resources, 277 netfs_i_cookie(netfs_inode(rreq->inode))); 278 } 279 280 static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq) 281 { 282 struct nfs_netfs_io_data *netfs; 283 284 netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT); 285 if (!netfs) 286 return NULL; 287 netfs->sreq = sreq; 288 refcount_set(&netfs->refcount, 1); 289 return netfs; 290 } 291 292 static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq) 293 { 294 size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize; 295 296 sreq->len = min(sreq->len, rsize); 297 return true; 298 } 299 300 static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq) 301 { 302 struct nfs_netfs_io_data *netfs; 303 struct nfs_pageio_descriptor pgio; 304 struct inode *inode = sreq->rreq->inode; 305 struct nfs_open_context *ctx = sreq->rreq->netfs_priv; 306 struct page *page; 307 int err; 308 pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT; 309 pgoff_t last = ((sreq->start + sreq->len - 310 sreq->transferred - 1) >> PAGE_SHIFT); 311 XA_STATE(xas, &sreq->rreq->mapping->i_pages, start); 312 313 nfs_pageio_init_read(&pgio, inode, false, 314 &nfs_async_read_completion_ops); 315 316 netfs = nfs_netfs_alloc(sreq); 317 if (!netfs) 318 return netfs_subreq_terminated(sreq, -ENOMEM, false); 319 320 pgio.pg_netfs = netfs; /* used in completion */ 321 322 xas_lock(&xas); 323 xas_for_each(&xas, page, last) { 324 /* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */ 325 xas_pause(&xas); 326 xas_unlock(&xas); 327 err = nfs_read_add_folio(&pgio, ctx, page_folio(page)); 328 if (err < 0) { 329 netfs->error = err; 330 goto out; 331 } 332 xas_lock(&xas); 333 } 334 xas_unlock(&xas); 335 out: 336 nfs_pageio_complete_read(&pgio); 337 nfs_netfs_put(netfs); 338 } 339 340 void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) 341 { 342 struct nfs_netfs_io_data *netfs = hdr->netfs; 343 344 if (!netfs) 345 return; 346 347 nfs_netfs_get(netfs); 348 } 349 350 int nfs_netfs_folio_unlock(struct folio *folio) 351 { 352 struct inode *inode = folio_file_mapping(folio)->host; 353 354 /* 355 * If fscache is enabled, netfs will unlock pages. 356 */ 357 if (netfs_inode(inode)->cache) 358 return 0; 359 360 return 1; 361 } 362 363 void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) 364 { 365 struct nfs_netfs_io_data *netfs = hdr->netfs; 366 struct netfs_io_subrequest *sreq; 367 368 if (!netfs) 369 return; 370 371 sreq = netfs->sreq; 372 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) 373 __set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags); 374 375 if (hdr->error) 376 netfs->error = hdr->error; 377 else 378 atomic64_add(hdr->res.count, &netfs->transferred); 379 380 nfs_netfs_put(netfs); 381 hdr->netfs = NULL; 382 } 383 384 const struct netfs_request_ops nfs_netfs_ops = { 385 .init_request = nfs_netfs_init_request, 386 .free_request = nfs_netfs_free_request, 387 .begin_cache_operation = nfs_netfs_begin_cache_operation, 388 .issue_read = nfs_netfs_issue_read, 389 .clamp_length = nfs_netfs_clamp_length 390 }; 391