1 /* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/sunrpc/clnt.h> 15 #include <linux/nfs3.h> 16 #include <linux/nfs4.h> 17 #include <linux/nfs_page.h> 18 #include <linux/nfs_fs.h> 19 #include <linux/nfs_mount.h> 20 21 #include "internal.h" 22 23 static struct kmem_cache *nfs_page_cachep; 24 25 static inline struct nfs_page * 26 nfs_page_alloc(void) 27 { 28 struct nfs_page *p; 29 p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); 30 if (p) { 31 memset(p, 0, sizeof(*p)); 32 INIT_LIST_HEAD(&p->wb_list); 33 } 34 return p; 35 } 36 37 static inline void 38 nfs_page_free(struct nfs_page *p) 39 { 40 kmem_cache_free(nfs_page_cachep, p); 41 } 42 43 /** 44 * nfs_create_request - Create an NFS read/write request. 45 * @file: file descriptor to use 46 * @inode: inode to which the request is attached 47 * @page: page to write 48 * @offset: starting offset within the page for the write 49 * @count: number of bytes to read/write 50 * 51 * The page must be locked by the caller. This makes sure we never 52 * create two different requests for the same page. 53 * User should ensure it is safe to sleep in this function. 54 */ 55 struct nfs_page * 56 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, 57 struct page *page, 58 unsigned int offset, unsigned int count) 59 { 60 struct nfs_server *server = NFS_SERVER(inode); 61 struct nfs_page *req; 62 63 for (;;) { 64 /* try to allocate the request struct */ 65 req = nfs_page_alloc(); 66 if (req != NULL) 67 break; 68 69 if (signalled() && (server->flags & NFS_MOUNT_INTR)) 70 return ERR_PTR(-ERESTARTSYS); 71 yield(); 72 } 73 74 /* Initialize the request struct. Initially, we assume a 75 * long write-back delay. This will be adjusted in 76 * update_nfs_request below if the region is not locked. */ 77 req->wb_page = page; 78 atomic_set(&req->wb_complete, 0); 79 req->wb_index = page->index; 80 page_cache_get(page); 81 BUG_ON(PagePrivate(page)); 82 BUG_ON(!PageLocked(page)); 83 BUG_ON(page->mapping->host != inode); 84 req->wb_offset = offset; 85 req->wb_pgbase = offset; 86 req->wb_bytes = count; 87 atomic_set(&req->wb_count, 1); 88 req->wb_context = get_nfs_open_context(ctx); 89 90 return req; 91 } 92 93 /** 94 * nfs_unlock_request - Unlock request and wake up sleepers. 95 * @req: 96 */ 97 void nfs_unlock_request(struct nfs_page *req) 98 { 99 if (!NFS_WBACK_BUSY(req)) { 100 printk(KERN_ERR "NFS: Invalid unlock attempted\n"); 101 BUG(); 102 } 103 smp_mb__before_clear_bit(); 104 clear_bit(PG_BUSY, &req->wb_flags); 105 smp_mb__after_clear_bit(); 106 wake_up_bit(&req->wb_flags, PG_BUSY); 107 nfs_release_request(req); 108 } 109 110 /** 111 * nfs_set_page_writeback_locked - Lock a request for writeback 112 * @req: 113 */ 114 int nfs_set_page_writeback_locked(struct nfs_page *req) 115 { 116 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 117 118 if (!nfs_lock_request(req)) 119 return 0; 120 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 121 return 1; 122 } 123 124 /** 125 * nfs_clear_page_writeback - Unlock request and wake up sleepers 126 */ 127 void nfs_clear_page_writeback(struct nfs_page *req) 128 { 129 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); 130 131 if (req->wb_page != NULL) { 132 spin_lock(&nfsi->req_lock); 133 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); 134 spin_unlock(&nfsi->req_lock); 135 } 136 nfs_unlock_request(req); 137 } 138 139 /** 140 * nfs_clear_request - Free up all resources allocated to the request 141 * @req: 142 * 143 * Release page resources associated with a write request after it 144 * has completed. 145 */ 146 void nfs_clear_request(struct nfs_page *req) 147 { 148 struct page *page = req->wb_page; 149 if (page != NULL) { 150 page_cache_release(page); 151 req->wb_page = NULL; 152 } 153 } 154 155 156 /** 157 * nfs_release_request - Release the count on an NFS read/write request 158 * @req: request to release 159 * 160 * Note: Should never be called with the spinlock held! 161 */ 162 void 163 nfs_release_request(struct nfs_page *req) 164 { 165 if (!atomic_dec_and_test(&req->wb_count)) 166 return; 167 168 /* Release struct file or cached credential */ 169 nfs_clear_request(req); 170 put_nfs_open_context(req->wb_context); 171 nfs_page_free(req); 172 } 173 174 static int nfs_wait_bit_interruptible(void *word) 175 { 176 int ret = 0; 177 178 if (signal_pending(current)) 179 ret = -ERESTARTSYS; 180 else 181 schedule(); 182 return ret; 183 } 184 185 /** 186 * nfs_wait_on_request - Wait for a request to complete. 187 * @req: request to wait upon. 188 * 189 * Interruptible by signals only if mounted with intr flag. 190 * The user is responsible for holding a count on the request. 191 */ 192 int 193 nfs_wait_on_request(struct nfs_page *req) 194 { 195 struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); 196 sigset_t oldmask; 197 int ret = 0; 198 199 if (!test_bit(PG_BUSY, &req->wb_flags)) 200 goto out; 201 /* 202 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we 203 * are not interrupted if intr flag is not set 204 */ 205 rpc_clnt_sigmask(clnt, &oldmask); 206 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, 207 nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); 208 rpc_clnt_sigunmask(clnt, &oldmask); 209 out: 210 return ret; 211 } 212 213 /** 214 * nfs_pageio_init - initialise a page io descriptor 215 * @desc: pointer to descriptor 216 * @inode: pointer to inode 217 * @doio: pointer to io function 218 * @bsize: io block size 219 * @io_flags: extra parameters for the io function 220 */ 221 void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 222 struct inode *inode, 223 int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), 224 size_t bsize, 225 int io_flags) 226 { 227 INIT_LIST_HEAD(&desc->pg_list); 228 desc->pg_bytes_written = 0; 229 desc->pg_count = 0; 230 desc->pg_bsize = bsize; 231 desc->pg_base = 0; 232 desc->pg_inode = inode; 233 desc->pg_doio = doio; 234 desc->pg_ioflags = io_flags; 235 desc->pg_error = 0; 236 } 237 238 /** 239 * nfs_can_coalesce_requests - test two requests for compatibility 240 * @prev: pointer to nfs_page 241 * @req: pointer to nfs_page 242 * 243 * The nfs_page structures 'prev' and 'req' are compared to ensure that the 244 * page data area they describe is contiguous, and that their RPC 245 * credentials, NFSv4 open state, and lockowners are the same. 246 * 247 * Return 'true' if this is the case, else return 'false'. 248 */ 249 static int nfs_can_coalesce_requests(struct nfs_page *prev, 250 struct nfs_page *req) 251 { 252 if (req->wb_context->cred != prev->wb_context->cred) 253 return 0; 254 if (req->wb_context->lockowner != prev->wb_context->lockowner) 255 return 0; 256 if (req->wb_context->state != prev->wb_context->state) 257 return 0; 258 if (req->wb_index != (prev->wb_index + 1)) 259 return 0; 260 if (req->wb_pgbase != 0) 261 return 0; 262 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 263 return 0; 264 return 1; 265 } 266 267 /** 268 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. 269 * @desc: destination io descriptor 270 * @req: request 271 * 272 * Returns true if the request 'req' was successfully coalesced into the 273 * existing list of pages 'desc'. 274 */ 275 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 276 struct nfs_page *req) 277 { 278 size_t newlen = req->wb_bytes; 279 280 if (desc->pg_count != 0) { 281 struct nfs_page *prev; 282 283 /* 284 * FIXME: ideally we should be able to coalesce all requests 285 * that are not block boundary aligned, but currently this 286 * is problematic for the case of bsize < PAGE_CACHE_SIZE, 287 * since nfs_flush_multi and nfs_pagein_multi assume you 288 * can have only one struct nfs_page. 289 */ 290 if (desc->pg_bsize < PAGE_SIZE) 291 return 0; 292 newlen += desc->pg_count; 293 if (newlen > desc->pg_bsize) 294 return 0; 295 prev = nfs_list_entry(desc->pg_list.prev); 296 if (!nfs_can_coalesce_requests(prev, req)) 297 return 0; 298 } else 299 desc->pg_base = req->wb_pgbase; 300 nfs_list_remove_request(req); 301 nfs_list_add_request(req, &desc->pg_list); 302 desc->pg_count = newlen; 303 return 1; 304 } 305 306 /* 307 * Helper for nfs_pageio_add_request and nfs_pageio_complete 308 */ 309 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) 310 { 311 if (!list_empty(&desc->pg_list)) { 312 int error = desc->pg_doio(desc->pg_inode, 313 &desc->pg_list, 314 nfs_page_array_len(desc->pg_base, 315 desc->pg_count), 316 desc->pg_count, 317 desc->pg_ioflags); 318 if (error < 0) 319 desc->pg_error = error; 320 else 321 desc->pg_bytes_written += desc->pg_count; 322 } 323 if (list_empty(&desc->pg_list)) { 324 desc->pg_count = 0; 325 desc->pg_base = 0; 326 } 327 } 328 329 /** 330 * nfs_pageio_add_request - Attempt to coalesce a request into a page list. 331 * @desc: destination io descriptor 332 * @req: request 333 * 334 * Returns true if the request 'req' was successfully coalesced into the 335 * existing list of pages 'desc'. 336 */ 337 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, 338 struct nfs_page *req) 339 { 340 while (!nfs_pageio_do_add_request(desc, req)) { 341 nfs_pageio_doio(desc); 342 if (desc->pg_error < 0) 343 return 0; 344 } 345 return 1; 346 } 347 348 /** 349 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 350 * @desc: pointer to io descriptor 351 */ 352 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 353 { 354 nfs_pageio_doio(desc); 355 } 356 357 #define NFS_SCAN_MAXENTRIES 16 358 /** 359 * nfs_scan_list - Scan a list for matching requests 360 * @nfsi: NFS inode 361 * @head: One of the NFS inode request lists 362 * @dst: Destination list 363 * @idx_start: lower bound of page->index to scan 364 * @npages: idx_start + npages sets the upper bound to scan. 365 * 366 * Moves elements from one of the inode request lists. 367 * If the number of requests is set to 0, the entire address_space 368 * starting at index idx_start, is scanned. 369 * The requests are *not* checked to ensure that they form a contiguous set. 370 * You must be holding the inode's req_lock when calling this function 371 */ 372 int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, 373 struct list_head *dst, pgoff_t idx_start, 374 unsigned int npages) 375 { 376 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 377 struct nfs_page *req; 378 pgoff_t idx_end; 379 int found, i; 380 int res; 381 382 res = 0; 383 if (npages == 0) 384 idx_end = ~0; 385 else 386 idx_end = idx_start + npages - 1; 387 388 for (;;) { 389 found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, 390 (void **)&pgvec[0], idx_start, 391 NFS_SCAN_MAXENTRIES); 392 if (found <= 0) 393 break; 394 for (i = 0; i < found; i++) { 395 req = pgvec[i]; 396 if (req->wb_index > idx_end) 397 goto out; 398 idx_start = req->wb_index + 1; 399 if (req->wb_list_head != head) 400 continue; 401 if (nfs_set_page_writeback_locked(req)) { 402 nfs_list_remove_request(req); 403 nfs_list_add_request(req, dst); 404 res++; 405 } 406 } 407 408 } 409 out: 410 return res; 411 } 412 413 int __init nfs_init_nfspagecache(void) 414 { 415 nfs_page_cachep = kmem_cache_create("nfs_page", 416 sizeof(struct nfs_page), 417 0, SLAB_HWCACHE_ALIGN, 418 NULL, NULL); 419 if (nfs_page_cachep == NULL) 420 return -ENOMEM; 421 422 return 0; 423 } 424 425 void nfs_destroy_nfspagecache(void) 426 { 427 kmem_cache_destroy(nfs_page_cachep); 428 } 429 430