1 /* 2 * linux/fs/nfs/pagelist.c 3 * 4 * A set of helper functions for managing NFS read and write requests. 5 * The main purpose of these routines is to provide support for the 6 * coalescing of several requests into a single RPC call. 7 * 8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> 9 * 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/file.h> 14 #include <linux/sched.h> 15 #include <linux/sunrpc/clnt.h> 16 #include <linux/nfs3.h> 17 #include <linux/nfs4.h> 18 #include <linux/nfs_page.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_mount.h> 21 22 #include "internal.h" 23 24 static struct kmem_cache *nfs_page_cachep; 25 26 static inline struct nfs_page * 27 nfs_page_alloc(void) 28 { 29 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); 30 if (p) 31 INIT_LIST_HEAD(&p->wb_list); 32 return p; 33 } 34 35 static inline void 36 nfs_page_free(struct nfs_page *p) 37 { 38 kmem_cache_free(nfs_page_cachep, p); 39 } 40 41 /** 42 * nfs_create_request - Create an NFS read/write request. 43 * @file: file descriptor to use 44 * @inode: inode to which the request is attached 45 * @page: page to write 46 * @offset: starting offset within the page for the write 47 * @count: number of bytes to read/write 48 * 49 * The page must be locked by the caller. This makes sure we never 50 * create two different requests for the same page. 51 * User should ensure it is safe to sleep in this function. 52 */ 53 struct nfs_page * 54 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, 55 struct page *page, 56 unsigned int offset, unsigned int count) 57 { 58 struct nfs_page *req; 59 60 /* try to allocate the request struct */ 61 req = nfs_page_alloc(); 62 if (req == NULL) 63 return ERR_PTR(-ENOMEM); 64 65 /* get lock context early so we can deal with alloc failures */ 66 req->wb_lock_context = nfs_get_lock_context(ctx); 67 if (req->wb_lock_context == NULL) { 68 nfs_page_free(req); 69 return ERR_PTR(-ENOMEM); 70 } 71 72 /* Initialize the request struct. Initially, we assume a 73 * long write-back delay. This will be adjusted in 74 * update_nfs_request below if the region is not locked. */ 75 req->wb_page = page; 76 atomic_set(&req->wb_complete, 0); 77 req->wb_index = page->index; 78 page_cache_get(page); 79 BUG_ON(PagePrivate(page)); 80 BUG_ON(!PageLocked(page)); 81 BUG_ON(page->mapping->host != inode); 82 req->wb_offset = offset; 83 req->wb_pgbase = offset; 84 req->wb_bytes = count; 85 req->wb_context = get_nfs_open_context(ctx); 86 kref_init(&req->wb_kref); 87 return req; 88 } 89 90 /** 91 * nfs_unlock_request - Unlock request and wake up sleepers. 92 * @req: 93 */ 94 void nfs_unlock_request(struct nfs_page *req) 95 { 96 if (!NFS_WBACK_BUSY(req)) { 97 printk(KERN_ERR "NFS: Invalid unlock attempted\n"); 98 BUG(); 99 } 100 smp_mb__before_clear_bit(); 101 clear_bit(PG_BUSY, &req->wb_flags); 102 smp_mb__after_clear_bit(); 103 wake_up_bit(&req->wb_flags, PG_BUSY); 104 nfs_release_request(req); 105 } 106 107 /** 108 * nfs_set_page_tag_locked - Tag a request as locked 109 * @req: 110 */ 111 int nfs_set_page_tag_locked(struct nfs_page *req) 112 { 113 if (!nfs_lock_request_dontget(req)) 114 return 0; 115 if (test_bit(PG_MAPPED, &req->wb_flags)) 116 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 117 return 1; 118 } 119 120 /** 121 * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers 122 */ 123 void nfs_clear_page_tag_locked(struct nfs_page *req) 124 { 125 if (test_bit(PG_MAPPED, &req->wb_flags)) { 126 struct inode *inode = req->wb_context->path.dentry->d_inode; 127 struct nfs_inode *nfsi = NFS_I(inode); 128 129 spin_lock(&inode->i_lock); 130 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 131 nfs_unlock_request(req); 132 spin_unlock(&inode->i_lock); 133 } else 134 nfs_unlock_request(req); 135 } 136 137 /** 138 * nfs_clear_request - Free up all resources allocated to the request 139 * @req: 140 * 141 * Release page and open context resources associated with a read/write 142 * request after it has completed. 143 */ 144 void nfs_clear_request(struct nfs_page *req) 145 { 146 struct page *page = req->wb_page; 147 struct nfs_open_context *ctx = req->wb_context; 148 struct nfs_lock_context *l_ctx = req->wb_lock_context; 149 150 if (page != NULL) { 151 page_cache_release(page); 152 req->wb_page = NULL; 153 } 154 if (l_ctx != NULL) { 155 nfs_put_lock_context(l_ctx); 156 req->wb_lock_context = NULL; 157 } 158 if (ctx != NULL) { 159 put_nfs_open_context(ctx); 160 req->wb_context = NULL; 161 } 162 } 163 164 165 /** 166 * nfs_release_request - Release the count on an NFS read/write request 167 * @req: request to release 168 * 169 * Note: Should never be called with the spinlock held! 170 */ 171 static void nfs_free_request(struct kref *kref) 172 { 173 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 174 175 /* Release struct file and open context */ 176 nfs_clear_request(req); 177 nfs_page_free(req); 178 } 179 180 void nfs_release_request(struct nfs_page *req) 181 { 182 kref_put(&req->wb_kref, nfs_free_request); 183 } 184 185 static int nfs_wait_bit_uninterruptible(void *word) 186 { 187 io_schedule(); 188 return 0; 189 } 190 191 /** 192 * nfs_wait_on_request - Wait for a request to complete. 193 * @req: request to wait upon. 194 * 195 * Interruptible by fatal signals only. 196 * The user is responsible for holding a count on the request. 197 */ 198 int 199 nfs_wait_on_request(struct nfs_page *req) 200 { 201 return wait_on_bit(&req->wb_flags, PG_BUSY, 202 nfs_wait_bit_uninterruptible, 203 TASK_UNINTERRUPTIBLE); 204 } 205 206 /** 207 * nfs_pageio_init - initialise a page io descriptor 208 * @desc: pointer to descriptor 209 * @inode: pointer to inode 210 * @doio: pointer to io function 211 * @bsize: io block size 212 * @io_flags: extra parameters for the io function 213 */ 214 void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 215 struct inode *inode, 216 int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), 217 size_t bsize, 218 int io_flags) 219 { 220 INIT_LIST_HEAD(&desc->pg_list); 221 desc->pg_bytes_written = 0; 222 desc->pg_count = 0; 223 desc->pg_bsize = bsize; 224 desc->pg_base = 0; 225 desc->pg_inode = inode; 226 desc->pg_doio = doio; 227 desc->pg_ioflags = io_flags; 228 desc->pg_error = 0; 229 } 230 231 /** 232 * nfs_can_coalesce_requests - test two requests for compatibility 233 * @prev: pointer to nfs_page 234 * @req: pointer to nfs_page 235 * 236 * The nfs_page structures 'prev' and 'req' are compared to ensure that the 237 * page data area they describe is contiguous, and that their RPC 238 * credentials, NFSv4 open state, and lockowners are the same. 239 * 240 * Return 'true' if this is the case, else return 'false'. 241 */ 242 static int nfs_can_coalesce_requests(struct nfs_page *prev, 243 struct nfs_page *req) 244 { 245 if (req->wb_context->cred != prev->wb_context->cred) 246 return 0; 247 if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner) 248 return 0; 249 if (req->wb_context->state != prev->wb_context->state) 250 return 0; 251 if (req->wb_index != (prev->wb_index + 1)) 252 return 0; 253 if (req->wb_pgbase != 0) 254 return 0; 255 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 256 return 0; 257 return 1; 258 } 259 260 /** 261 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. 262 * @desc: destination io descriptor 263 * @req: request 264 * 265 * Returns true if the request 'req' was successfully coalesced into the 266 * existing list of pages 'desc'. 267 */ 268 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, 269 struct nfs_page *req) 270 { 271 size_t newlen = req->wb_bytes; 272 273 if (desc->pg_count != 0) { 274 struct nfs_page *prev; 275 276 /* 277 * FIXME: ideally we should be able to coalesce all requests 278 * that are not block boundary aligned, but currently this 279 * is problematic for the case of bsize < PAGE_CACHE_SIZE, 280 * since nfs_flush_multi and nfs_pagein_multi assume you 281 * can have only one struct nfs_page. 282 */ 283 if (desc->pg_bsize < PAGE_SIZE) 284 return 0; 285 newlen += desc->pg_count; 286 if (newlen > desc->pg_bsize) 287 return 0; 288 prev = nfs_list_entry(desc->pg_list.prev); 289 if (!nfs_can_coalesce_requests(prev, req)) 290 return 0; 291 } else 292 desc->pg_base = req->wb_pgbase; 293 nfs_list_remove_request(req); 294 nfs_list_add_request(req, &desc->pg_list); 295 desc->pg_count = newlen; 296 return 1; 297 } 298 299 /* 300 * Helper for nfs_pageio_add_request and nfs_pageio_complete 301 */ 302 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) 303 { 304 if (!list_empty(&desc->pg_list)) { 305 int error = desc->pg_doio(desc->pg_inode, 306 &desc->pg_list, 307 nfs_page_array_len(desc->pg_base, 308 desc->pg_count), 309 desc->pg_count, 310 desc->pg_ioflags); 311 if (error < 0) 312 desc->pg_error = error; 313 else 314 desc->pg_bytes_written += desc->pg_count; 315 } 316 if (list_empty(&desc->pg_list)) { 317 desc->pg_count = 0; 318 desc->pg_base = 0; 319 } 320 } 321 322 /** 323 * nfs_pageio_add_request - Attempt to coalesce a request into a page list. 324 * @desc: destination io descriptor 325 * @req: request 326 * 327 * Returns true if the request 'req' was successfully coalesced into the 328 * existing list of pages 'desc'. 329 */ 330 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, 331 struct nfs_page *req) 332 { 333 while (!nfs_pageio_do_add_request(desc, req)) { 334 nfs_pageio_doio(desc); 335 if (desc->pg_error < 0) 336 return 0; 337 } 338 return 1; 339 } 340 341 /** 342 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor 343 * @desc: pointer to io descriptor 344 */ 345 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) 346 { 347 nfs_pageio_doio(desc); 348 } 349 350 /** 351 * nfs_pageio_cond_complete - Conditional I/O completion 352 * @desc: pointer to io descriptor 353 * @index: page index 354 * 355 * It is important to ensure that processes don't try to take locks 356 * on non-contiguous ranges of pages as that might deadlock. This 357 * function should be called before attempting to wait on a locked 358 * nfs_page. It will complete the I/O if the page index 'index' 359 * is not contiguous with the existing list of pages in 'desc'. 360 */ 361 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) 362 { 363 if (!list_empty(&desc->pg_list)) { 364 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); 365 if (index != prev->wb_index + 1) 366 nfs_pageio_doio(desc); 367 } 368 } 369 370 #define NFS_SCAN_MAXENTRIES 16 371 /** 372 * nfs_scan_list - Scan a list for matching requests 373 * @nfsi: NFS inode 374 * @dst: Destination list 375 * @idx_start: lower bound of page->index to scan 376 * @npages: idx_start + npages sets the upper bound to scan. 377 * @tag: tag to scan for 378 * 379 * Moves elements from one of the inode request lists. 380 * If the number of requests is set to 0, the entire address_space 381 * starting at index idx_start, is scanned. 382 * The requests are *not* checked to ensure that they form a contiguous set. 383 * You must be holding the inode's i_lock when calling this function 384 */ 385 int nfs_scan_list(struct nfs_inode *nfsi, 386 struct list_head *dst, pgoff_t idx_start, 387 unsigned int npages, int tag) 388 { 389 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; 390 struct nfs_page *req; 391 pgoff_t idx_end; 392 int found, i; 393 int res; 394 395 res = 0; 396 if (npages == 0) 397 idx_end = ~0; 398 else 399 idx_end = idx_start + npages - 1; 400 401 for (;;) { 402 found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, 403 (void **)&pgvec[0], idx_start, 404 NFS_SCAN_MAXENTRIES, tag); 405 if (found <= 0) 406 break; 407 for (i = 0; i < found; i++) { 408 req = pgvec[i]; 409 if (req->wb_index > idx_end) 410 goto out; 411 idx_start = req->wb_index + 1; 412 if (nfs_set_page_tag_locked(req)) { 413 kref_get(&req->wb_kref); 414 nfs_list_remove_request(req); 415 radix_tree_tag_clear(&nfsi->nfs_page_tree, 416 req->wb_index, tag); 417 nfs_list_add_request(req, dst); 418 res++; 419 if (res == INT_MAX) 420 goto out; 421 } 422 } 423 /* for latency reduction */ 424 cond_resched_lock(&nfsi->vfs_inode.i_lock); 425 } 426 out: 427 return res; 428 } 429 430 int __init nfs_init_nfspagecache(void) 431 { 432 nfs_page_cachep = kmem_cache_create("nfs_page", 433 sizeof(struct nfs_page), 434 0, SLAB_HWCACHE_ALIGN, 435 NULL); 436 if (nfs_page_cachep == NULL) 437 return -ENOMEM; 438 439 return 0; 440 } 441 442 void nfs_destroy_nfspagecache(void) 443 { 444 kmem_cache_destroy(nfs_page_cachep); 445 } 446 447