1 /* 2 * linux/fs/nfs/direct.c 3 * 4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> 5 * 6 * High-performance uncached I/O for the Linux NFS client 7 * 8 * There are important applications whose performance or correctness 9 * depends on uncached access to file data. Database clusters 10 * (multiple copies of the same instance running on separate hosts) 11 * implement their own cache coherency protocol that subsumes file 12 * system cache protocols. Applications that process datasets 13 * considerably larger than the client's memory do not always benefit 14 * from a local cache. A streaming video server, for instance, has no 15 * need to cache the contents of a file. 16 * 17 * When an application requests uncached I/O, all read and write requests 18 * are made directly to the server; data stored or fetched via these 19 * requests is not cached in the Linux page cache. The client does not 20 * correct unaligned requests from applications. All requested bytes are 21 * held on permanent storage before a direct write system call returns to 22 * an application. 23 * 24 * Solaris implements an uncached I/O facility called directio() that 25 * is used for backups and sequential I/O to very large files. Solaris 26 * also supports uncaching whole NFS partitions with "-o forcedirectio," 27 * an undocumented mount option. 28 * 29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with 30 * help from Andrew Morton. 31 * 32 * 18 Dec 2001 Initial implementation for 2.4 --cel 33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy 34 * 08 Jun 2003 Port to 2.5 APIs --cel 35 * 31 Mar 2004 Handle direct I/O without VFS support --cel 36 * 15 Sep 2004 Parallel async reads --cel 37 * 04 May 2005 support O_DIRECT with aio --cel 38 * 39 */ 40 41 #include <linux/errno.h> 42 #include <linux/sched.h> 43 #include <linux/kernel.h> 44 #include <linux/file.h> 45 #include <linux/pagemap.h> 46 #include <linux/kref.h> 47 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/sunrpc/clnt.h> 51 52 #include <asm/system.h> 53 #include <asm/uaccess.h> 54 #include <asm/atomic.h> 55 56 #include "internal.h" 57 #include "iostat.h" 58 59 #define NFSDBG_FACILITY NFSDBG_VFS 60 61 static struct kmem_cache *nfs_direct_cachep; 62 63 /* 64 * This represents a set of asynchronous requests that we're waiting on 65 */ 66 struct nfs_direct_req { 67 struct kref kref; /* release manager */ 68 69 /* I/O parameters */ 70 struct nfs_open_context *ctx; /* file open context info */ 71 struct kiocb * iocb; /* controlling i/o request */ 72 struct inode * inode; /* target file of i/o */ 73 74 /* completion state */ 75 atomic_t io_count; /* i/os we're waiting for */ 76 spinlock_t lock; /* protect completion state */ 77 ssize_t count, /* bytes actually processed */ 78 error; /* any reported error */ 79 struct completion completion; /* wait for i/o completion */ 80 81 /* commit state */ 82 struct list_head rewrite_list; /* saved nfs_write_data structs */ 83 struct nfs_write_data * commit_data; /* special write_data for commits */ 84 int flags; 85 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 86 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 87 struct nfs_writeverf verf; /* unstable write verifier */ 88 }; 89 90 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 91 static const struct rpc_call_ops nfs_write_direct_ops; 92 93 static inline void get_dreq(struct nfs_direct_req *dreq) 94 { 95 atomic_inc(&dreq->io_count); 96 } 97 98 static inline int put_dreq(struct nfs_direct_req *dreq) 99 { 100 return atomic_dec_and_test(&dreq->io_count); 101 } 102 103 /** 104 * nfs_direct_IO - NFS address space operation for direct I/O 105 * @rw: direction (read or write) 106 * @iocb: target I/O control block 107 * @iov: array of vectors that define I/O buffer 108 * @pos: offset in file to begin the operation 109 * @nr_segs: size of iovec array 110 * 111 * The presence of this routine in the address space ops vector means 112 * the NFS client supports direct I/O. However, we shunt off direct 113 * read and write requests before the VFS gets them, so this method 114 * should never be called. 115 */ 116 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) 117 { 118 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n", 119 iocb->ki_filp->f_path.dentry->d_name.name, 120 (long long) pos, nr_segs); 121 122 return -EINVAL; 123 } 124 125 static void nfs_direct_dirty_pages(struct page **pages, int npages) 126 { 127 int i; 128 for (i = 0; i < npages; i++) { 129 struct page *page = pages[i]; 130 if (!PageCompound(page)) 131 set_page_dirty_lock(page); 132 } 133 } 134 135 static void nfs_direct_release_pages(struct page **pages, int npages) 136 { 137 int i; 138 for (i = 0; i < npages; i++) 139 page_cache_release(pages[i]); 140 } 141 142 static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 143 { 144 struct nfs_direct_req *dreq; 145 146 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); 147 if (!dreq) 148 return NULL; 149 150 kref_init(&dreq->kref); 151 kref_get(&dreq->kref); 152 init_completion(&dreq->completion); 153 INIT_LIST_HEAD(&dreq->rewrite_list); 154 dreq->iocb = NULL; 155 dreq->ctx = NULL; 156 spin_lock_init(&dreq->lock); 157 atomic_set(&dreq->io_count, 0); 158 dreq->count = 0; 159 dreq->error = 0; 160 dreq->flags = 0; 161 162 return dreq; 163 } 164 165 static void nfs_direct_req_release(struct kref *kref) 166 { 167 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 168 169 if (dreq->ctx != NULL) 170 put_nfs_open_context(dreq->ctx); 171 kmem_cache_free(nfs_direct_cachep, dreq); 172 } 173 174 /* 175 * Collects and returns the final error value/byte-count. 176 */ 177 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) 178 { 179 ssize_t result = -EIOCBQUEUED; 180 181 /* Async requests don't wait here */ 182 if (dreq->iocb) 183 goto out; 184 185 result = wait_for_completion_interruptible(&dreq->completion); 186 187 if (!result) 188 result = dreq->error; 189 if (!result) 190 result = dreq->count; 191 192 out: 193 kref_put(&dreq->kref, nfs_direct_req_release); 194 return (ssize_t) result; 195 } 196 197 /* 198 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust 199 * the iocb is still valid here if this is a synchronous request. 200 */ 201 static void nfs_direct_complete(struct nfs_direct_req *dreq) 202 { 203 if (dreq->iocb) { 204 long res = (long) dreq->error; 205 if (!res) 206 res = (long) dreq->count; 207 aio_complete(dreq->iocb, res, 0); 208 } 209 complete_all(&dreq->completion); 210 211 kref_put(&dreq->kref, nfs_direct_req_release); 212 } 213 214 /* 215 * We must hold a reference to all the pages in this direct read request 216 * until the RPCs complete. This could be long *after* we are woken up in 217 * nfs_direct_wait (for instance, if someone hits ^C on a slow server). 218 */ 219 static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 220 { 221 struct nfs_read_data *data = calldata; 222 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 223 224 if (nfs_readpage_result(task, data) != 0) 225 return; 226 227 nfs_direct_dirty_pages(data->pagevec, data->npages); 228 nfs_direct_release_pages(data->pagevec, data->npages); 229 230 spin_lock(&dreq->lock); 231 232 if (likely(task->tk_status >= 0)) 233 dreq->count += data->res.count; 234 else 235 dreq->error = task->tk_status; 236 237 spin_unlock(&dreq->lock); 238 239 if (put_dreq(dreq)) 240 nfs_direct_complete(dreq); 241 } 242 243 static const struct rpc_call_ops nfs_read_direct_ops = { 244 .rpc_call_done = nfs_direct_read_result, 245 .rpc_release = nfs_readdata_release, 246 }; 247 248 /* 249 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ 250 * operation. If nfs_readdata_alloc() or get_user_pages() fails, 251 * bail and stop sending more reads. Read length accounting is 252 * handled automatically by nfs_direct_read_result(). Otherwise, if 253 * no requests have been sent, just return an error. 254 */ 255 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos) 256 { 257 struct nfs_open_context *ctx = dreq->ctx; 258 struct inode *inode = ctx->dentry->d_inode; 259 size_t rsize = NFS_SERVER(inode)->rsize; 260 unsigned int pgbase; 261 int result; 262 ssize_t started = 0; 263 264 get_dreq(dreq); 265 266 do { 267 struct nfs_read_data *data; 268 size_t bytes; 269 270 pgbase = user_addr & ~PAGE_MASK; 271 bytes = min(rsize,count); 272 273 result = -ENOMEM; 274 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); 275 if (unlikely(!data)) 276 break; 277 278 down_read(¤t->mm->mmap_sem); 279 result = get_user_pages(current, current->mm, user_addr, 280 data->npages, 1, 0, data->pagevec, NULL); 281 up_read(¤t->mm->mmap_sem); 282 if (unlikely(result < data->npages)) { 283 if (result > 0) 284 nfs_direct_release_pages(data->pagevec, result); 285 nfs_readdata_release(data); 286 break; 287 } 288 289 get_dreq(dreq); 290 291 data->req = (struct nfs_page *) dreq; 292 data->inode = inode; 293 data->cred = ctx->cred; 294 data->args.fh = NFS_FH(inode); 295 data->args.context = ctx; 296 data->args.offset = pos; 297 data->args.pgbase = pgbase; 298 data->args.pages = data->pagevec; 299 data->args.count = bytes; 300 data->res.fattr = &data->fattr; 301 data->res.eof = 0; 302 data->res.count = bytes; 303 304 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 305 &nfs_read_direct_ops, data); 306 NFS_PROTO(inode)->read_setup(data); 307 308 data->task.tk_cookie = (unsigned long) inode; 309 310 rpc_execute(&data->task); 311 312 dprintk("NFS: %5u initiated direct read call " 313 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 314 data->task.tk_pid, 315 inode->i_sb->s_id, 316 (long long)NFS_FILEID(inode), 317 bytes, 318 (unsigned long long)data->args.offset); 319 320 started += bytes; 321 user_addr += bytes; 322 pos += bytes; 323 /* FIXME: Remove this unnecessary math from final patch */ 324 pgbase += bytes; 325 pgbase &= ~PAGE_MASK; 326 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 327 328 count -= bytes; 329 } while (count != 0); 330 331 if (put_dreq(dreq)) 332 nfs_direct_complete(dreq); 333 334 if (started) 335 return 0; 336 return result < 0 ? (ssize_t) result : -EFAULT; 337 } 338 339 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) 340 { 341 ssize_t result = 0; 342 sigset_t oldset; 343 struct inode *inode = iocb->ki_filp->f_mapping->host; 344 struct rpc_clnt *clnt = NFS_CLIENT(inode); 345 struct nfs_direct_req *dreq; 346 347 dreq = nfs_direct_req_alloc(); 348 if (!dreq) 349 return -ENOMEM; 350 351 dreq->inode = inode; 352 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 353 if (!is_sync_kiocb(iocb)) 354 dreq->iocb = iocb; 355 356 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); 357 rpc_clnt_sigmask(clnt, &oldset); 358 result = nfs_direct_read_schedule(dreq, user_addr, count, pos); 359 if (!result) 360 result = nfs_direct_wait(dreq); 361 rpc_clnt_sigunmask(clnt, &oldset); 362 363 return result; 364 } 365 366 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 367 { 368 while (!list_empty(&dreq->rewrite_list)) { 369 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); 370 list_del(&data->pages); 371 nfs_direct_release_pages(data->pagevec, data->npages); 372 nfs_writedata_release(data); 373 } 374 } 375 376 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 377 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 378 { 379 struct inode *inode = dreq->inode; 380 struct list_head *p; 381 struct nfs_write_data *data; 382 383 dreq->count = 0; 384 get_dreq(dreq); 385 386 list_for_each(p, &dreq->rewrite_list) { 387 data = list_entry(p, struct nfs_write_data, pages); 388 389 get_dreq(dreq); 390 391 /* 392 * Reset data->res. 393 */ 394 nfs_fattr_init(&data->fattr); 395 data->res.count = data->args.count; 396 memset(&data->verf, 0, sizeof(data->verf)); 397 398 /* 399 * Reuse data->task; data->args should not have changed 400 * since the original request was sent. 401 */ 402 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 403 &nfs_write_direct_ops, data); 404 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); 405 406 data->task.tk_priority = RPC_PRIORITY_NORMAL; 407 data->task.tk_cookie = (unsigned long) inode; 408 409 /* 410 * We're called via an RPC callback, so BKL is already held. 411 */ 412 rpc_execute(&data->task); 413 414 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", 415 data->task.tk_pid, 416 inode->i_sb->s_id, 417 (long long)NFS_FILEID(inode), 418 data->args.count, 419 (unsigned long long)data->args.offset); 420 } 421 422 if (put_dreq(dreq)) 423 nfs_direct_write_complete(dreq, inode); 424 } 425 426 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 427 { 428 struct nfs_write_data *data = calldata; 429 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 430 431 /* Call the NFS version-specific code */ 432 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) 433 return; 434 if (unlikely(task->tk_status < 0)) { 435 dprintk("NFS: %5u commit failed with error %d.\n", 436 task->tk_pid, task->tk_status); 437 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 438 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) { 439 dprintk("NFS: %5u commit verify failed\n", task->tk_pid); 440 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 441 } 442 443 dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status); 444 nfs_direct_write_complete(dreq, data->inode); 445 } 446 447 static const struct rpc_call_ops nfs_commit_direct_ops = { 448 .rpc_call_done = nfs_direct_commit_result, 449 .rpc_release = nfs_commit_release, 450 }; 451 452 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) 453 { 454 struct nfs_write_data *data = dreq->commit_data; 455 456 data->inode = dreq->inode; 457 data->cred = dreq->ctx->cred; 458 459 data->args.fh = NFS_FH(data->inode); 460 data->args.offset = 0; 461 data->args.count = 0; 462 data->res.count = 0; 463 data->res.fattr = &data->fattr; 464 data->res.verf = &data->verf; 465 466 rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC, 467 &nfs_commit_direct_ops, data); 468 NFS_PROTO(data->inode)->commit_setup(data, 0); 469 470 data->task.tk_priority = RPC_PRIORITY_NORMAL; 471 data->task.tk_cookie = (unsigned long)data->inode; 472 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */ 473 dreq->commit_data = NULL; 474 475 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 476 477 rpc_execute(&data->task); 478 } 479 480 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 481 { 482 int flags = dreq->flags; 483 484 dreq->flags = 0; 485 switch (flags) { 486 case NFS_ODIRECT_DO_COMMIT: 487 nfs_direct_commit_schedule(dreq); 488 break; 489 case NFS_ODIRECT_RESCHED_WRITES: 490 nfs_direct_write_reschedule(dreq); 491 break; 492 default: 493 nfs_end_data_update(inode); 494 if (dreq->commit_data != NULL) 495 nfs_commit_free(dreq->commit_data); 496 nfs_direct_free_writedata(dreq); 497 nfs_zap_mapping(inode, inode->i_mapping); 498 nfs_direct_complete(dreq); 499 } 500 } 501 502 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 503 { 504 dreq->commit_data = nfs_commit_alloc(); 505 if (dreq->commit_data != NULL) 506 dreq->commit_data->req = (struct nfs_page *) dreq; 507 } 508 #else 509 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 510 { 511 dreq->commit_data = NULL; 512 } 513 514 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 515 { 516 nfs_end_data_update(inode); 517 nfs_direct_free_writedata(dreq); 518 nfs_zap_mapping(inode, inode->i_mapping); 519 nfs_direct_complete(dreq); 520 } 521 #endif 522 523 static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 524 { 525 struct nfs_write_data *data = calldata; 526 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 527 int status = task->tk_status; 528 529 if (nfs_writeback_done(task, data) != 0) 530 return; 531 532 spin_lock(&dreq->lock); 533 534 if (unlikely(dreq->error != 0)) 535 goto out_unlock; 536 if (unlikely(status < 0)) { 537 /* An error has occured, so we should not commit */ 538 dreq->flags = 0; 539 dreq->error = status; 540 } 541 542 dreq->count += data->res.count; 543 544 if (data->res.verf->committed != NFS_FILE_SYNC) { 545 switch (dreq->flags) { 546 case 0: 547 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf)); 548 dreq->flags = NFS_ODIRECT_DO_COMMIT; 549 break; 550 case NFS_ODIRECT_DO_COMMIT: 551 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) { 552 dprintk("NFS: %5u write verify failed\n", task->tk_pid); 553 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 554 } 555 } 556 } 557 out_unlock: 558 spin_unlock(&dreq->lock); 559 } 560 561 /* 562 * NB: Return the value of the first error return code. Subsequent 563 * errors after the first one are ignored. 564 */ 565 static void nfs_direct_write_release(void *calldata) 566 { 567 struct nfs_write_data *data = calldata; 568 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 569 570 if (put_dreq(dreq)) 571 nfs_direct_write_complete(dreq, data->inode); 572 } 573 574 static const struct rpc_call_ops nfs_write_direct_ops = { 575 .rpc_call_done = nfs_direct_write_result, 576 .rpc_release = nfs_direct_write_release, 577 }; 578 579 /* 580 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE 581 * operation. If nfs_writedata_alloc() or get_user_pages() fails, 582 * bail and stop sending more writes. Write length accounting is 583 * handled automatically by nfs_direct_write_result(). Otherwise, if 584 * no requests have been sent, just return an error. 585 */ 586 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync) 587 { 588 struct nfs_open_context *ctx = dreq->ctx; 589 struct inode *inode = ctx->dentry->d_inode; 590 size_t wsize = NFS_SERVER(inode)->wsize; 591 unsigned int pgbase; 592 int result; 593 ssize_t started = 0; 594 595 get_dreq(dreq); 596 597 do { 598 struct nfs_write_data *data; 599 size_t bytes; 600 601 pgbase = user_addr & ~PAGE_MASK; 602 bytes = min(wsize,count); 603 604 result = -ENOMEM; 605 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); 606 if (unlikely(!data)) 607 break; 608 609 down_read(¤t->mm->mmap_sem); 610 result = get_user_pages(current, current->mm, user_addr, 611 data->npages, 0, 0, data->pagevec, NULL); 612 up_read(¤t->mm->mmap_sem); 613 if (unlikely(result < data->npages)) { 614 if (result > 0) 615 nfs_direct_release_pages(data->pagevec, result); 616 nfs_writedata_release(data); 617 break; 618 } 619 620 get_dreq(dreq); 621 622 list_move_tail(&data->pages, &dreq->rewrite_list); 623 624 data->req = (struct nfs_page *) dreq; 625 data->inode = inode; 626 data->cred = ctx->cred; 627 data->args.fh = NFS_FH(inode); 628 data->args.context = ctx; 629 data->args.offset = pos; 630 data->args.pgbase = pgbase; 631 data->args.pages = data->pagevec; 632 data->args.count = bytes; 633 data->res.fattr = &data->fattr; 634 data->res.count = bytes; 635 data->res.verf = &data->verf; 636 637 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC, 638 &nfs_write_direct_ops, data); 639 NFS_PROTO(inode)->write_setup(data, sync); 640 641 data->task.tk_priority = RPC_PRIORITY_NORMAL; 642 data->task.tk_cookie = (unsigned long) inode; 643 644 rpc_execute(&data->task); 645 646 dprintk("NFS: %5u initiated direct write call " 647 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 648 data->task.tk_pid, 649 inode->i_sb->s_id, 650 (long long)NFS_FILEID(inode), 651 bytes, 652 (unsigned long long)data->args.offset); 653 654 started += bytes; 655 user_addr += bytes; 656 pos += bytes; 657 658 /* FIXME: Remove this useless math from the final patch */ 659 pgbase += bytes; 660 pgbase &= ~PAGE_MASK; 661 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 662 663 count -= bytes; 664 } while (count != 0); 665 666 if (put_dreq(dreq)) 667 nfs_direct_write_complete(dreq, inode); 668 669 if (started) 670 return 0; 671 return result < 0 ? (ssize_t) result : -EFAULT; 672 } 673 674 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos) 675 { 676 ssize_t result = 0; 677 sigset_t oldset; 678 struct inode *inode = iocb->ki_filp->f_mapping->host; 679 struct rpc_clnt *clnt = NFS_CLIENT(inode); 680 struct nfs_direct_req *dreq; 681 size_t wsize = NFS_SERVER(inode)->wsize; 682 int sync = 0; 683 684 dreq = nfs_direct_req_alloc(); 685 if (!dreq) 686 return -ENOMEM; 687 nfs_alloc_commit_data(dreq); 688 689 if (dreq->commit_data == NULL || count < wsize) 690 sync = FLUSH_STABLE; 691 692 dreq->inode = inode; 693 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 694 if (!is_sync_kiocb(iocb)) 695 dreq->iocb = iocb; 696 697 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count); 698 699 nfs_begin_data_update(inode); 700 701 rpc_clnt_sigmask(clnt, &oldset); 702 result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync); 703 if (!result) 704 result = nfs_direct_wait(dreq); 705 rpc_clnt_sigunmask(clnt, &oldset); 706 707 return result; 708 } 709 710 /** 711 * nfs_file_direct_read - file direct read operation for NFS files 712 * @iocb: target I/O control block 713 * @iov: vector of user buffers into which to read data 714 * @nr_segs: size of iov vector 715 * @pos: byte offset in file where reading starts 716 * 717 * We use this function for direct reads instead of calling 718 * generic_file_aio_read() in order to avoid gfar's check to see if 719 * the request starts before the end of the file. For that check 720 * to work, we must generate a GETATTR before each direct read, and 721 * even then there is a window between the GETATTR and the subsequent 722 * READ where the file size could change. Our preference is simply 723 * to do all reads the application wants, and the server will take 724 * care of managing the end of file boundary. 725 * 726 * This function also eliminates unnecessarily updating the file's 727 * atime locally, as the NFS server sets the file's atime, and this 728 * client must read the updated atime from the server back into its 729 * cache. 730 */ 731 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, 732 unsigned long nr_segs, loff_t pos) 733 { 734 ssize_t retval = -EINVAL; 735 struct file *file = iocb->ki_filp; 736 struct address_space *mapping = file->f_mapping; 737 /* XXX: temporary */ 738 const char __user *buf = iov[0].iov_base; 739 size_t count = iov[0].iov_len; 740 741 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", 742 file->f_path.dentry->d_parent->d_name.name, 743 file->f_path.dentry->d_name.name, 744 (unsigned long) count, (long long) pos); 745 746 if (nr_segs != 1) 747 return -EINVAL; 748 749 if (count < 0) 750 goto out; 751 retval = -EFAULT; 752 if (!access_ok(VERIFY_WRITE, buf, count)) 753 goto out; 754 retval = 0; 755 if (!count) 756 goto out; 757 758 retval = nfs_sync_mapping(mapping); 759 if (retval) 760 goto out; 761 762 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos); 763 if (retval > 0) 764 iocb->ki_pos = pos + retval; 765 766 out: 767 return retval; 768 } 769 770 /** 771 * nfs_file_direct_write - file direct write operation for NFS files 772 * @iocb: target I/O control block 773 * @iov: vector of user buffers from which to write data 774 * @nr_segs: size of iov vector 775 * @pos: byte offset in file where writing starts 776 * 777 * We use this function for direct writes instead of calling 778 * generic_file_aio_write() in order to avoid taking the inode 779 * semaphore and updating the i_size. The NFS server will set 780 * the new i_size and this client must read the updated size 781 * back into its cache. We let the server do generic write 782 * parameter checking and report problems. 783 * 784 * We also avoid an unnecessary invocation of generic_osync_inode(), 785 * as it is fairly meaningless to sync the metadata of an NFS file. 786 * 787 * We eliminate local atime updates, see direct read above. 788 * 789 * We avoid unnecessary page cache invalidations for normal cached 790 * readers of this file. 791 * 792 * Note that O_APPEND is not supported for NFS direct writes, as there 793 * is no atomic O_APPEND write facility in the NFS protocol. 794 */ 795 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 796 unsigned long nr_segs, loff_t pos) 797 { 798 ssize_t retval; 799 struct file *file = iocb->ki_filp; 800 struct address_space *mapping = file->f_mapping; 801 /* XXX: temporary */ 802 const char __user *buf = iov[0].iov_base; 803 size_t count = iov[0].iov_len; 804 805 dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n", 806 file->f_path.dentry->d_parent->d_name.name, 807 file->f_path.dentry->d_name.name, 808 (unsigned long) count, (long long) pos); 809 810 if (nr_segs != 1) 811 return -EINVAL; 812 813 retval = generic_write_checks(file, &pos, &count, 0); 814 if (retval) 815 goto out; 816 817 retval = -EINVAL; 818 if ((ssize_t) count < 0) 819 goto out; 820 retval = 0; 821 if (!count) 822 goto out; 823 824 retval = -EFAULT; 825 if (!access_ok(VERIFY_READ, buf, count)) 826 goto out; 827 828 retval = nfs_sync_mapping(mapping); 829 if (retval) 830 goto out; 831 832 retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); 833 834 if (retval > 0) 835 iocb->ki_pos = pos + retval; 836 837 out: 838 return retval; 839 } 840 841 /** 842 * nfs_init_directcache - create a slab cache for nfs_direct_req structures 843 * 844 */ 845 int __init nfs_init_directcache(void) 846 { 847 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 848 sizeof(struct nfs_direct_req), 849 0, (SLAB_RECLAIM_ACCOUNT| 850 SLAB_MEM_SPREAD), 851 NULL, NULL); 852 if (nfs_direct_cachep == NULL) 853 return -ENOMEM; 854 855 return 0; 856 } 857 858 /** 859 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures 860 * 861 */ 862 void nfs_destroy_directcache(void) 863 { 864 kmem_cache_destroy(nfs_direct_cachep); 865 } 866