1 /* 2 * linux/fs/nfs/direct.c 3 * 4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> 5 * 6 * High-performance uncached I/O for the Linux NFS client 7 * 8 * There are important applications whose performance or correctness 9 * depends on uncached access to file data. Database clusters 10 * (multiple copies of the same instance running on separate hosts) 11 * implement their own cache coherency protocol that subsumes file 12 * system cache protocols. Applications that process datasets 13 * considerably larger than the client's memory do not always benefit 14 * from a local cache. A streaming video server, for instance, has no 15 * need to cache the contents of a file. 16 * 17 * When an application requests uncached I/O, all read and write requests 18 * are made directly to the server; data stored or fetched via these 19 * requests is not cached in the Linux page cache. The client does not 20 * correct unaligned requests from applications. All requested bytes are 21 * held on permanent storage before a direct write system call returns to 22 * an application. 23 * 24 * Solaris implements an uncached I/O facility called directio() that 25 * is used for backups and sequential I/O to very large files. Solaris 26 * also supports uncaching whole NFS partitions with "-o forcedirectio," 27 * an undocumented mount option. 28 * 29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with 30 * help from Andrew Morton. 31 * 32 * 18 Dec 2001 Initial implementation for 2.4 --cel 33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy 34 * 08 Jun 2003 Port to 2.5 APIs --cel 35 * 31 Mar 2004 Handle direct I/O without VFS support --cel 36 * 15 Sep 2004 Parallel async reads --cel 37 * 04 May 2005 support O_DIRECT with aio --cel 38 * 39 */ 40 41 #include <linux/errno.h> 42 #include <linux/sched.h> 43 #include <linux/kernel.h> 44 #include <linux/file.h> 45 #include <linux/pagemap.h> 46 #include <linux/kref.h> 47 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/sunrpc/clnt.h> 51 52 #include <asm/system.h> 53 #include <asm/uaccess.h> 54 #include <asm/atomic.h> 55 56 #include "internal.h" 57 #include "iostat.h" 58 59 #define NFSDBG_FACILITY NFSDBG_VFS 60 61 static struct kmem_cache *nfs_direct_cachep; 62 63 /* 64 * This represents a set of asynchronous requests that we're waiting on 65 */ 66 struct nfs_direct_req { 67 struct kref kref; /* release manager */ 68 69 /* I/O parameters */ 70 struct nfs_open_context *ctx; /* file open context info */ 71 struct kiocb * iocb; /* controlling i/o request */ 72 struct inode * inode; /* target file of i/o */ 73 74 /* completion state */ 75 atomic_t io_count; /* i/os we're waiting for */ 76 spinlock_t lock; /* protect completion state */ 77 ssize_t count, /* bytes actually processed */ 78 error; /* any reported error */ 79 struct completion completion; /* wait for i/o completion */ 80 81 /* commit state */ 82 struct list_head rewrite_list; /* saved nfs_write_data structs */ 83 struct nfs_write_data * commit_data; /* special write_data for commits */ 84 int flags; 85 #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 86 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 87 struct nfs_writeverf verf; /* unstable write verifier */ 88 }; 89 90 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 91 static const struct rpc_call_ops nfs_write_direct_ops; 92 93 static inline void get_dreq(struct nfs_direct_req *dreq) 94 { 95 atomic_inc(&dreq->io_count); 96 } 97 98 static inline int put_dreq(struct nfs_direct_req *dreq) 99 { 100 return atomic_dec_and_test(&dreq->io_count); 101 } 102 103 /** 104 * nfs_direct_IO - NFS address space operation for direct I/O 105 * @rw: direction (read or write) 106 * @iocb: target I/O control block 107 * @iov: array of vectors that define I/O buffer 108 * @pos: offset in file to begin the operation 109 * @nr_segs: size of iovec array 110 * 111 * The presence of this routine in the address space ops vector means 112 * the NFS client supports direct I/O. However, we shunt off direct 113 * read and write requests before the VFS gets them, so this method 114 * should never be called. 115 */ 116 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) 117 { 118 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n", 119 iocb->ki_filp->f_path.dentry->d_name.name, 120 (long long) pos, nr_segs); 121 122 return -EINVAL; 123 } 124 125 static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count) 126 { 127 unsigned int npages; 128 unsigned int i; 129 130 if (count == 0) 131 return; 132 pages += (pgbase >> PAGE_SHIFT); 133 npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 134 for (i = 0; i < npages; i++) { 135 struct page *page = pages[i]; 136 if (!PageCompound(page)) 137 set_page_dirty(page); 138 } 139 } 140 141 static void nfs_direct_release_pages(struct page **pages, unsigned int npages) 142 { 143 unsigned int i; 144 for (i = 0; i < npages; i++) 145 page_cache_release(pages[i]); 146 } 147 148 static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 149 { 150 struct nfs_direct_req *dreq; 151 152 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); 153 if (!dreq) 154 return NULL; 155 156 kref_init(&dreq->kref); 157 kref_get(&dreq->kref); 158 init_completion(&dreq->completion); 159 INIT_LIST_HEAD(&dreq->rewrite_list); 160 dreq->iocb = NULL; 161 dreq->ctx = NULL; 162 spin_lock_init(&dreq->lock); 163 atomic_set(&dreq->io_count, 0); 164 dreq->count = 0; 165 dreq->error = 0; 166 dreq->flags = 0; 167 168 return dreq; 169 } 170 171 static void nfs_direct_req_free(struct kref *kref) 172 { 173 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 174 175 if (dreq->ctx != NULL) 176 put_nfs_open_context(dreq->ctx); 177 kmem_cache_free(nfs_direct_cachep, dreq); 178 } 179 180 static void nfs_direct_req_release(struct nfs_direct_req *dreq) 181 { 182 kref_put(&dreq->kref, nfs_direct_req_free); 183 } 184 185 /* 186 * Collects and returns the final error value/byte-count. 187 */ 188 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) 189 { 190 ssize_t result = -EIOCBQUEUED; 191 192 /* Async requests don't wait here */ 193 if (dreq->iocb) 194 goto out; 195 196 result = wait_for_completion_killable(&dreq->completion); 197 198 if (!result) 199 result = dreq->error; 200 if (!result) 201 result = dreq->count; 202 203 out: 204 return (ssize_t) result; 205 } 206 207 /* 208 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust 209 * the iocb is still valid here if this is a synchronous request. 210 */ 211 static void nfs_direct_complete(struct nfs_direct_req *dreq) 212 { 213 if (dreq->iocb) { 214 long res = (long) dreq->error; 215 if (!res) 216 res = (long) dreq->count; 217 aio_complete(dreq->iocb, res, 0); 218 } 219 complete_all(&dreq->completion); 220 221 nfs_direct_req_release(dreq); 222 } 223 224 /* 225 * We must hold a reference to all the pages in this direct read request 226 * until the RPCs complete. This could be long *after* we are woken up in 227 * nfs_direct_wait (for instance, if someone hits ^C on a slow server). 228 */ 229 static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 230 { 231 struct nfs_read_data *data = calldata; 232 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 233 234 if (nfs_readpage_result(task, data) != 0) 235 return; 236 237 spin_lock(&dreq->lock); 238 if (unlikely(task->tk_status < 0)) { 239 dreq->error = task->tk_status; 240 spin_unlock(&dreq->lock); 241 } else { 242 dreq->count += data->res.count; 243 spin_unlock(&dreq->lock); 244 nfs_direct_dirty_pages(data->pagevec, 245 data->args.pgbase, 246 data->res.count); 247 } 248 nfs_direct_release_pages(data->pagevec, data->npages); 249 250 if (put_dreq(dreq)) 251 nfs_direct_complete(dreq); 252 } 253 254 static const struct rpc_call_ops nfs_read_direct_ops = { 255 .rpc_call_done = nfs_direct_read_result, 256 .rpc_release = nfs_readdata_release, 257 }; 258 259 /* 260 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ 261 * operation. If nfs_readdata_alloc() or get_user_pages() fails, 262 * bail and stop sending more reads. Read length accounting is 263 * handled automatically by nfs_direct_read_result(). Otherwise, if 264 * no requests have been sent, just return an error. 265 */ 266 static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, 267 const struct iovec *iov, 268 loff_t pos) 269 { 270 struct nfs_open_context *ctx = dreq->ctx; 271 struct inode *inode = ctx->path.dentry->d_inode; 272 unsigned long user_addr = (unsigned long)iov->iov_base; 273 size_t count = iov->iov_len; 274 size_t rsize = NFS_SERVER(inode)->rsize; 275 struct rpc_task *task; 276 struct rpc_message msg = { 277 .rpc_cred = ctx->cred, 278 }; 279 struct rpc_task_setup task_setup_data = { 280 .rpc_client = NFS_CLIENT(inode), 281 .rpc_message = &msg, 282 .callback_ops = &nfs_read_direct_ops, 283 .flags = RPC_TASK_ASYNC, 284 }; 285 unsigned int pgbase; 286 int result; 287 ssize_t started = 0; 288 289 do { 290 struct nfs_read_data *data; 291 size_t bytes; 292 293 pgbase = user_addr & ~PAGE_MASK; 294 bytes = min(rsize,count); 295 296 result = -ENOMEM; 297 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); 298 if (unlikely(!data)) 299 break; 300 301 down_read(¤t->mm->mmap_sem); 302 result = get_user_pages(current, current->mm, user_addr, 303 data->npages, 1, 0, data->pagevec, NULL); 304 up_read(¤t->mm->mmap_sem); 305 if (result < 0) { 306 nfs_readdata_release(data); 307 break; 308 } 309 if ((unsigned)result < data->npages) { 310 bytes = result * PAGE_SIZE; 311 if (bytes <= pgbase) { 312 nfs_direct_release_pages(data->pagevec, result); 313 nfs_readdata_release(data); 314 break; 315 } 316 bytes -= pgbase; 317 data->npages = result; 318 } 319 320 get_dreq(dreq); 321 322 data->req = (struct nfs_page *) dreq; 323 data->inode = inode; 324 data->cred = msg.rpc_cred; 325 data->args.fh = NFS_FH(inode); 326 data->args.context = ctx; 327 data->args.offset = pos; 328 data->args.pgbase = pgbase; 329 data->args.pages = data->pagevec; 330 data->args.count = bytes; 331 data->res.fattr = &data->fattr; 332 data->res.eof = 0; 333 data->res.count = bytes; 334 msg.rpc_argp = &data->args; 335 msg.rpc_resp = &data->res; 336 337 task_setup_data.task = &data->task; 338 task_setup_data.callback_data = data; 339 NFS_PROTO(inode)->read_setup(data, &msg); 340 341 task = rpc_run_task(&task_setup_data); 342 if (!IS_ERR(task)) 343 rpc_put_task(task); 344 345 dprintk("NFS: %5u initiated direct read call " 346 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 347 data->task.tk_pid, 348 inode->i_sb->s_id, 349 (long long)NFS_FILEID(inode), 350 bytes, 351 (unsigned long long)data->args.offset); 352 353 started += bytes; 354 user_addr += bytes; 355 pos += bytes; 356 /* FIXME: Remove this unnecessary math from final patch */ 357 pgbase += bytes; 358 pgbase &= ~PAGE_MASK; 359 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 360 361 count -= bytes; 362 } while (count != 0); 363 364 if (started) 365 return started; 366 return result < 0 ? (ssize_t) result : -EFAULT; 367 } 368 369 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, 370 const struct iovec *iov, 371 unsigned long nr_segs, 372 loff_t pos) 373 { 374 ssize_t result = -EINVAL; 375 size_t requested_bytes = 0; 376 unsigned long seg; 377 378 get_dreq(dreq); 379 380 for (seg = 0; seg < nr_segs; seg++) { 381 const struct iovec *vec = &iov[seg]; 382 result = nfs_direct_read_schedule_segment(dreq, vec, pos); 383 if (result < 0) 384 break; 385 requested_bytes += result; 386 if ((size_t)result < vec->iov_len) 387 break; 388 pos += vec->iov_len; 389 } 390 391 if (put_dreq(dreq)) 392 nfs_direct_complete(dreq); 393 394 if (requested_bytes != 0) 395 return 0; 396 397 if (result < 0) 398 return result; 399 return -EIO; 400 } 401 402 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, 403 unsigned long nr_segs, loff_t pos) 404 { 405 ssize_t result = 0; 406 struct inode *inode = iocb->ki_filp->f_mapping->host; 407 struct nfs_direct_req *dreq; 408 409 dreq = nfs_direct_req_alloc(); 410 if (!dreq) 411 return -ENOMEM; 412 413 dreq->inode = inode; 414 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 415 if (!is_sync_kiocb(iocb)) 416 dreq->iocb = iocb; 417 418 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos); 419 if (!result) 420 result = nfs_direct_wait(dreq); 421 nfs_direct_req_release(dreq); 422 423 return result; 424 } 425 426 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 427 { 428 while (!list_empty(&dreq->rewrite_list)) { 429 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages); 430 list_del(&data->pages); 431 nfs_direct_release_pages(data->pagevec, data->npages); 432 nfs_writedata_release(data); 433 } 434 } 435 436 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 437 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 438 { 439 struct inode *inode = dreq->inode; 440 struct list_head *p; 441 struct nfs_write_data *data; 442 struct rpc_task *task; 443 struct rpc_message msg = { 444 .rpc_cred = dreq->ctx->cred, 445 }; 446 struct rpc_task_setup task_setup_data = { 447 .rpc_client = NFS_CLIENT(inode), 448 .callback_ops = &nfs_write_direct_ops, 449 .flags = RPC_TASK_ASYNC, 450 }; 451 452 dreq->count = 0; 453 get_dreq(dreq); 454 455 list_for_each(p, &dreq->rewrite_list) { 456 data = list_entry(p, struct nfs_write_data, pages); 457 458 get_dreq(dreq); 459 460 /* Use stable writes */ 461 data->args.stable = NFS_FILE_SYNC; 462 463 /* 464 * Reset data->res. 465 */ 466 nfs_fattr_init(&data->fattr); 467 data->res.count = data->args.count; 468 memset(&data->verf, 0, sizeof(data->verf)); 469 470 /* 471 * Reuse data->task; data->args should not have changed 472 * since the original request was sent. 473 */ 474 task_setup_data.task = &data->task; 475 task_setup_data.callback_data = data; 476 msg.rpc_argp = &data->args; 477 msg.rpc_resp = &data->res; 478 NFS_PROTO(inode)->write_setup(data, &msg); 479 480 /* 481 * We're called via an RPC callback, so BKL is already held. 482 */ 483 task = rpc_run_task(&task_setup_data); 484 if (!IS_ERR(task)) 485 rpc_put_task(task); 486 487 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n", 488 data->task.tk_pid, 489 inode->i_sb->s_id, 490 (long long)NFS_FILEID(inode), 491 data->args.count, 492 (unsigned long long)data->args.offset); 493 } 494 495 if (put_dreq(dreq)) 496 nfs_direct_write_complete(dreq, inode); 497 } 498 499 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 500 { 501 struct nfs_write_data *data = calldata; 502 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 503 504 /* Call the NFS version-specific code */ 505 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0) 506 return; 507 if (unlikely(task->tk_status < 0)) { 508 dprintk("NFS: %5u commit failed with error %d.\n", 509 task->tk_pid, task->tk_status); 510 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 511 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) { 512 dprintk("NFS: %5u commit verify failed\n", task->tk_pid); 513 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 514 } 515 516 dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status); 517 nfs_direct_write_complete(dreq, data->inode); 518 } 519 520 static const struct rpc_call_ops nfs_commit_direct_ops = { 521 .rpc_call_done = nfs_direct_commit_result, 522 .rpc_release = nfs_commit_release, 523 }; 524 525 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) 526 { 527 struct nfs_write_data *data = dreq->commit_data; 528 struct rpc_task *task; 529 struct rpc_message msg = { 530 .rpc_argp = &data->args, 531 .rpc_resp = &data->res, 532 .rpc_cred = dreq->ctx->cred, 533 }; 534 struct rpc_task_setup task_setup_data = { 535 .task = &data->task, 536 .rpc_client = NFS_CLIENT(dreq->inode), 537 .rpc_message = &msg, 538 .callback_ops = &nfs_commit_direct_ops, 539 .callback_data = data, 540 .flags = RPC_TASK_ASYNC, 541 }; 542 543 data->inode = dreq->inode; 544 data->cred = msg.rpc_cred; 545 546 data->args.fh = NFS_FH(data->inode); 547 data->args.offset = 0; 548 data->args.count = 0; 549 data->res.count = 0; 550 data->res.fattr = &data->fattr; 551 data->res.verf = &data->verf; 552 553 NFS_PROTO(data->inode)->commit_setup(data, &msg); 554 555 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */ 556 dreq->commit_data = NULL; 557 558 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 559 560 task = rpc_run_task(&task_setup_data); 561 if (!IS_ERR(task)) 562 rpc_put_task(task); 563 } 564 565 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 566 { 567 int flags = dreq->flags; 568 569 dreq->flags = 0; 570 switch (flags) { 571 case NFS_ODIRECT_DO_COMMIT: 572 nfs_direct_commit_schedule(dreq); 573 break; 574 case NFS_ODIRECT_RESCHED_WRITES: 575 nfs_direct_write_reschedule(dreq); 576 break; 577 default: 578 if (dreq->commit_data != NULL) 579 nfs_commit_free(dreq->commit_data); 580 nfs_direct_free_writedata(dreq); 581 nfs_zap_mapping(inode, inode->i_mapping); 582 nfs_direct_complete(dreq); 583 } 584 } 585 586 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 587 { 588 dreq->commit_data = nfs_commit_alloc(); 589 if (dreq->commit_data != NULL) 590 dreq->commit_data->req = (struct nfs_page *) dreq; 591 } 592 #else 593 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 594 { 595 dreq->commit_data = NULL; 596 } 597 598 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 599 { 600 nfs_direct_free_writedata(dreq); 601 nfs_zap_mapping(inode, inode->i_mapping); 602 nfs_direct_complete(dreq); 603 } 604 #endif 605 606 static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 607 { 608 struct nfs_write_data *data = calldata; 609 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 610 int status = task->tk_status; 611 612 if (nfs_writeback_done(task, data) != 0) 613 return; 614 615 spin_lock(&dreq->lock); 616 617 if (unlikely(status < 0)) { 618 /* An error has occurred, so we should not commit */ 619 dreq->flags = 0; 620 dreq->error = status; 621 } 622 if (unlikely(dreq->error != 0)) 623 goto out_unlock; 624 625 dreq->count += data->res.count; 626 627 if (data->res.verf->committed != NFS_FILE_SYNC) { 628 switch (dreq->flags) { 629 case 0: 630 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf)); 631 dreq->flags = NFS_ODIRECT_DO_COMMIT; 632 break; 633 case NFS_ODIRECT_DO_COMMIT: 634 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) { 635 dprintk("NFS: %5u write verify failed\n", task->tk_pid); 636 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 637 } 638 } 639 } 640 out_unlock: 641 spin_unlock(&dreq->lock); 642 } 643 644 /* 645 * NB: Return the value of the first error return code. Subsequent 646 * errors after the first one are ignored. 647 */ 648 static void nfs_direct_write_release(void *calldata) 649 { 650 struct nfs_write_data *data = calldata; 651 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 652 653 if (put_dreq(dreq)) 654 nfs_direct_write_complete(dreq, data->inode); 655 } 656 657 static const struct rpc_call_ops nfs_write_direct_ops = { 658 .rpc_call_done = nfs_direct_write_result, 659 .rpc_release = nfs_direct_write_release, 660 }; 661 662 /* 663 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE 664 * operation. If nfs_writedata_alloc() or get_user_pages() fails, 665 * bail and stop sending more writes. Write length accounting is 666 * handled automatically by nfs_direct_write_result(). Otherwise, if 667 * no requests have been sent, just return an error. 668 */ 669 static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, 670 const struct iovec *iov, 671 loff_t pos, int sync) 672 { 673 struct nfs_open_context *ctx = dreq->ctx; 674 struct inode *inode = ctx->path.dentry->d_inode; 675 unsigned long user_addr = (unsigned long)iov->iov_base; 676 size_t count = iov->iov_len; 677 struct rpc_task *task; 678 struct rpc_message msg = { 679 .rpc_cred = ctx->cred, 680 }; 681 struct rpc_task_setup task_setup_data = { 682 .rpc_client = NFS_CLIENT(inode), 683 .rpc_message = &msg, 684 .callback_ops = &nfs_write_direct_ops, 685 .flags = RPC_TASK_ASYNC, 686 }; 687 size_t wsize = NFS_SERVER(inode)->wsize; 688 unsigned int pgbase; 689 int result; 690 ssize_t started = 0; 691 692 do { 693 struct nfs_write_data *data; 694 size_t bytes; 695 696 pgbase = user_addr & ~PAGE_MASK; 697 bytes = min(wsize,count); 698 699 result = -ENOMEM; 700 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); 701 if (unlikely(!data)) 702 break; 703 704 down_read(¤t->mm->mmap_sem); 705 result = get_user_pages(current, current->mm, user_addr, 706 data->npages, 0, 0, data->pagevec, NULL); 707 up_read(¤t->mm->mmap_sem); 708 if (result < 0) { 709 nfs_writedata_release(data); 710 break; 711 } 712 if ((unsigned)result < data->npages) { 713 bytes = result * PAGE_SIZE; 714 if (bytes <= pgbase) { 715 nfs_direct_release_pages(data->pagevec, result); 716 nfs_writedata_release(data); 717 break; 718 } 719 bytes -= pgbase; 720 data->npages = result; 721 } 722 723 get_dreq(dreq); 724 725 list_move_tail(&data->pages, &dreq->rewrite_list); 726 727 data->req = (struct nfs_page *) dreq; 728 data->inode = inode; 729 data->cred = msg.rpc_cred; 730 data->args.fh = NFS_FH(inode); 731 data->args.context = ctx; 732 data->args.offset = pos; 733 data->args.pgbase = pgbase; 734 data->args.pages = data->pagevec; 735 data->args.count = bytes; 736 data->args.stable = sync; 737 data->res.fattr = &data->fattr; 738 data->res.count = bytes; 739 data->res.verf = &data->verf; 740 741 task_setup_data.task = &data->task; 742 task_setup_data.callback_data = data; 743 msg.rpc_argp = &data->args; 744 msg.rpc_resp = &data->res; 745 NFS_PROTO(inode)->write_setup(data, &msg); 746 747 task = rpc_run_task(&task_setup_data); 748 if (!IS_ERR(task)) 749 rpc_put_task(task); 750 751 dprintk("NFS: %5u initiated direct write call " 752 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 753 data->task.tk_pid, 754 inode->i_sb->s_id, 755 (long long)NFS_FILEID(inode), 756 bytes, 757 (unsigned long long)data->args.offset); 758 759 started += bytes; 760 user_addr += bytes; 761 pos += bytes; 762 763 /* FIXME: Remove this useless math from the final patch */ 764 pgbase += bytes; 765 pgbase &= ~PAGE_MASK; 766 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 767 768 count -= bytes; 769 } while (count != 0); 770 771 if (started) 772 return started; 773 return result < 0 ? (ssize_t) result : -EFAULT; 774 } 775 776 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, 777 const struct iovec *iov, 778 unsigned long nr_segs, 779 loff_t pos, int sync) 780 { 781 ssize_t result = 0; 782 size_t requested_bytes = 0; 783 unsigned long seg; 784 785 get_dreq(dreq); 786 787 for (seg = 0; seg < nr_segs; seg++) { 788 const struct iovec *vec = &iov[seg]; 789 result = nfs_direct_write_schedule_segment(dreq, vec, 790 pos, sync); 791 if (result < 0) 792 break; 793 requested_bytes += result; 794 if ((size_t)result < vec->iov_len) 795 break; 796 pos += vec->iov_len; 797 } 798 799 if (put_dreq(dreq)) 800 nfs_direct_write_complete(dreq, dreq->inode); 801 802 if (requested_bytes != 0) 803 return 0; 804 805 if (result < 0) 806 return result; 807 return -EIO; 808 } 809 810 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, 811 unsigned long nr_segs, loff_t pos, 812 size_t count) 813 { 814 ssize_t result = 0; 815 struct inode *inode = iocb->ki_filp->f_mapping->host; 816 struct nfs_direct_req *dreq; 817 size_t wsize = NFS_SERVER(inode)->wsize; 818 int sync = NFS_UNSTABLE; 819 820 dreq = nfs_direct_req_alloc(); 821 if (!dreq) 822 return -ENOMEM; 823 nfs_alloc_commit_data(dreq); 824 825 if (dreq->commit_data == NULL || count < wsize) 826 sync = NFS_FILE_SYNC; 827 828 dreq->inode = inode; 829 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 830 if (!is_sync_kiocb(iocb)) 831 dreq->iocb = iocb; 832 833 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync); 834 if (!result) 835 result = nfs_direct_wait(dreq); 836 nfs_direct_req_release(dreq); 837 838 return result; 839 } 840 841 /** 842 * nfs_file_direct_read - file direct read operation for NFS files 843 * @iocb: target I/O control block 844 * @iov: vector of user buffers into which to read data 845 * @nr_segs: size of iov vector 846 * @pos: byte offset in file where reading starts 847 * 848 * We use this function for direct reads instead of calling 849 * generic_file_aio_read() in order to avoid gfar's check to see if 850 * the request starts before the end of the file. For that check 851 * to work, we must generate a GETATTR before each direct read, and 852 * even then there is a window between the GETATTR and the subsequent 853 * READ where the file size could change. Our preference is simply 854 * to do all reads the application wants, and the server will take 855 * care of managing the end of file boundary. 856 * 857 * This function also eliminates unnecessarily updating the file's 858 * atime locally, as the NFS server sets the file's atime, and this 859 * client must read the updated atime from the server back into its 860 * cache. 861 */ 862 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, 863 unsigned long nr_segs, loff_t pos) 864 { 865 ssize_t retval = -EINVAL; 866 struct file *file = iocb->ki_filp; 867 struct address_space *mapping = file->f_mapping; 868 size_t count; 869 870 count = iov_length(iov, nr_segs); 871 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); 872 873 dprintk("nfs: direct read(%s/%s, %zd@%Ld)\n", 874 file->f_path.dentry->d_parent->d_name.name, 875 file->f_path.dentry->d_name.name, 876 count, (long long) pos); 877 878 retval = 0; 879 if (!count) 880 goto out; 881 882 retval = nfs_sync_mapping(mapping); 883 if (retval) 884 goto out; 885 886 retval = nfs_direct_read(iocb, iov, nr_segs, pos); 887 if (retval > 0) 888 iocb->ki_pos = pos + retval; 889 890 out: 891 return retval; 892 } 893 894 /** 895 * nfs_file_direct_write - file direct write operation for NFS files 896 * @iocb: target I/O control block 897 * @iov: vector of user buffers from which to write data 898 * @nr_segs: size of iov vector 899 * @pos: byte offset in file where writing starts 900 * 901 * We use this function for direct writes instead of calling 902 * generic_file_aio_write() in order to avoid taking the inode 903 * semaphore and updating the i_size. The NFS server will set 904 * the new i_size and this client must read the updated size 905 * back into its cache. We let the server do generic write 906 * parameter checking and report problems. 907 * 908 * We also avoid an unnecessary invocation of generic_osync_inode(), 909 * as it is fairly meaningless to sync the metadata of an NFS file. 910 * 911 * We eliminate local atime updates, see direct read above. 912 * 913 * We avoid unnecessary page cache invalidations for normal cached 914 * readers of this file. 915 * 916 * Note that O_APPEND is not supported for NFS direct writes, as there 917 * is no atomic O_APPEND write facility in the NFS protocol. 918 */ 919 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 920 unsigned long nr_segs, loff_t pos) 921 { 922 ssize_t retval = -EINVAL; 923 struct file *file = iocb->ki_filp; 924 struct address_space *mapping = file->f_mapping; 925 size_t count; 926 927 count = iov_length(iov, nr_segs); 928 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); 929 930 dfprintk(VFS, "nfs: direct write(%s/%s, %zd@%Ld)\n", 931 file->f_path.dentry->d_parent->d_name.name, 932 file->f_path.dentry->d_name.name, 933 count, (long long) pos); 934 935 retval = generic_write_checks(file, &pos, &count, 0); 936 if (retval) 937 goto out; 938 939 retval = -EINVAL; 940 if ((ssize_t) count < 0) 941 goto out; 942 retval = 0; 943 if (!count) 944 goto out; 945 946 retval = nfs_sync_mapping(mapping); 947 if (retval) 948 goto out; 949 950 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count); 951 952 if (retval > 0) 953 iocb->ki_pos = pos + retval; 954 955 out: 956 return retval; 957 } 958 959 /** 960 * nfs_init_directcache - create a slab cache for nfs_direct_req structures 961 * 962 */ 963 int __init nfs_init_directcache(void) 964 { 965 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 966 sizeof(struct nfs_direct_req), 967 0, (SLAB_RECLAIM_ACCOUNT| 968 SLAB_MEM_SPREAD), 969 NULL); 970 if (nfs_direct_cachep == NULL) 971 return -ENOMEM; 972 973 return 0; 974 } 975 976 /** 977 * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures 978 * 979 */ 980 void nfs_destroy_directcache(void) 981 { 982 kmem_cache_destroy(nfs_direct_cachep); 983 } 984