1 /* 2 * linux/fs/nfs/direct.c 3 * 4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com> 5 * 6 * High-performance uncached I/O for the Linux NFS client 7 * 8 * There are important applications whose performance or correctness 9 * depends on uncached access to file data. Database clusters 10 * (multiple copies of the same instance running on separate hosts) 11 * implement their own cache coherency protocol that subsumes file 12 * system cache protocols. Applications that process datasets 13 * considerably larger than the client's memory do not always benefit 14 * from a local cache. A streaming video server, for instance, has no 15 * need to cache the contents of a file. 16 * 17 * When an application requests uncached I/O, all read and write requests 18 * are made directly to the server; data stored or fetched via these 19 * requests is not cached in the Linux page cache. The client does not 20 * correct unaligned requests from applications. All requested bytes are 21 * held on permanent storage before a direct write system call returns to 22 * an application. 23 * 24 * Solaris implements an uncached I/O facility called directio() that 25 * is used for backups and sequential I/O to very large files. Solaris 26 * also supports uncaching whole NFS partitions with "-o forcedirectio," 27 * an undocumented mount option. 28 * 29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with 30 * help from Andrew Morton. 31 * 32 * 18 Dec 2001 Initial implementation for 2.4 --cel 33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy 34 * 08 Jun 2003 Port to 2.5 APIs --cel 35 * 31 Mar 2004 Handle direct I/O without VFS support --cel 36 * 15 Sep 2004 Parallel async reads --cel 37 * 38 */ 39 40 #include <linux/config.h> 41 #include <linux/errno.h> 42 #include <linux/sched.h> 43 #include <linux/kernel.h> 44 #include <linux/smp_lock.h> 45 #include <linux/file.h> 46 #include <linux/pagemap.h> 47 #include <linux/kref.h> 48 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/sunrpc/clnt.h> 52 53 #include <asm/system.h> 54 #include <asm/uaccess.h> 55 #include <asm/atomic.h> 56 57 #define NFSDBG_FACILITY NFSDBG_VFS 58 #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT) 59 60 static kmem_cache_t *nfs_direct_cachep; 61 62 /* 63 * This represents a set of asynchronous requests that we're waiting on 64 */ 65 struct nfs_direct_req { 66 struct kref kref; /* release manager */ 67 struct list_head list; /* nfs_read_data structs */ 68 wait_queue_head_t wait; /* wait for i/o completion */ 69 struct page ** pages; /* pages in our buffer */ 70 unsigned int npages; /* count of pages */ 71 atomic_t complete, /* i/os we're waiting for */ 72 count, /* bytes actually processed */ 73 error; /* any reported error */ 74 }; 75 76 77 /** 78 * nfs_get_user_pages - find and set up pages underlying user's buffer 79 * rw: direction (read or write) 80 * user_addr: starting address of this segment of user's buffer 81 * count: size of this segment 82 * @pages: returned array of page struct pointers underlying user's buffer 83 */ 84 static inline int 85 nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, 86 struct page ***pages) 87 { 88 int result = -ENOMEM; 89 unsigned long page_count; 90 size_t array_size; 91 92 /* set an arbitrary limit to prevent type overflow */ 93 /* XXX: this can probably be as large as INT_MAX */ 94 if (size > MAX_DIRECTIO_SIZE) { 95 *pages = NULL; 96 return -EFBIG; 97 } 98 99 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; 100 page_count -= user_addr >> PAGE_SHIFT; 101 102 array_size = (page_count * sizeof(struct page *)); 103 *pages = kmalloc(array_size, GFP_KERNEL); 104 if (*pages) { 105 down_read(¤t->mm->mmap_sem); 106 result = get_user_pages(current, current->mm, user_addr, 107 page_count, (rw == READ), 0, 108 *pages, NULL); 109 up_read(¤t->mm->mmap_sem); 110 } 111 return result; 112 } 113 114 /** 115 * nfs_free_user_pages - tear down page struct array 116 * @pages: array of page struct pointers underlying target buffer 117 * @npages: number of pages in the array 118 * @do_dirty: dirty the pages as we release them 119 */ 120 static void 121 nfs_free_user_pages(struct page **pages, int npages, int do_dirty) 122 { 123 int i; 124 for (i = 0; i < npages; i++) { 125 struct page *page = pages[i]; 126 if (do_dirty && !PageCompound(page)) 127 set_page_dirty_lock(page); 128 page_cache_release(page); 129 } 130 kfree(pages); 131 } 132 133 /** 134 * nfs_direct_req_release - release nfs_direct_req structure for direct read 135 * @kref: kref object embedded in an nfs_direct_req structure 136 * 137 */ 138 static void nfs_direct_req_release(struct kref *kref) 139 { 140 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 141 kmem_cache_free(nfs_direct_cachep, dreq); 142 } 143 144 /** 145 * nfs_direct_read_alloc - allocate nfs_read_data structures for direct read 146 * @count: count of bytes for the read request 147 * @rsize: local rsize setting 148 * 149 * Note we also set the number of requests we have in the dreq when we are 150 * done. This prevents races with I/O completion so we will always wait 151 * until all requests have been dispatched and completed. 152 */ 153 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int rsize) 154 { 155 struct list_head *list; 156 struct nfs_direct_req *dreq; 157 unsigned int reads = 0; 158 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 159 160 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); 161 if (!dreq) 162 return NULL; 163 164 kref_init(&dreq->kref); 165 init_waitqueue_head(&dreq->wait); 166 INIT_LIST_HEAD(&dreq->list); 167 atomic_set(&dreq->count, 0); 168 atomic_set(&dreq->error, 0); 169 170 list = &dreq->list; 171 for(;;) { 172 struct nfs_read_data *data = nfs_readdata_alloc(rpages); 173 174 if (unlikely(!data)) { 175 while (!list_empty(list)) { 176 data = list_entry(list->next, 177 struct nfs_read_data, pages); 178 list_del(&data->pages); 179 nfs_readdata_free(data); 180 } 181 kref_put(&dreq->kref, nfs_direct_req_release); 182 return NULL; 183 } 184 185 INIT_LIST_HEAD(&data->pages); 186 list_add(&data->pages, list); 187 188 data->req = (struct nfs_page *) dreq; 189 reads++; 190 if (nbytes <= rsize) 191 break; 192 nbytes -= rsize; 193 } 194 kref_get(&dreq->kref); 195 atomic_set(&dreq->complete, reads); 196 return dreq; 197 } 198 199 /** 200 * nfs_direct_read_result - handle a read reply for a direct read request 201 * @data: address of NFS READ operation control block 202 * @status: status of this NFS READ operation 203 * 204 * We must hold a reference to all the pages in this direct read request 205 * until the RPCs complete. This could be long *after* we are woken up in 206 * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server). 207 */ 208 static void nfs_direct_read_result(struct nfs_read_data *data, int status) 209 { 210 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 211 212 if (likely(status >= 0)) 213 atomic_add(data->res.count, &dreq->count); 214 else 215 atomic_set(&dreq->error, status); 216 217 if (unlikely(atomic_dec_and_test(&dreq->complete))) { 218 nfs_free_user_pages(dreq->pages, dreq->npages, 1); 219 wake_up(&dreq->wait); 220 kref_put(&dreq->kref, nfs_direct_req_release); 221 } 222 } 223 224 /** 225 * nfs_direct_read_schedule - dispatch NFS READ operations for a direct read 226 * @dreq: address of nfs_direct_req struct for this request 227 * @inode: target inode 228 * @ctx: target file open context 229 * @user_addr: starting address of this segment of user's buffer 230 * @count: size of this segment 231 * @file_offset: offset in file to begin the operation 232 * 233 * For each nfs_read_data struct that was allocated on the list, dispatch 234 * an NFS READ operation 235 */ 236 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, 237 struct inode *inode, struct nfs_open_context *ctx, 238 unsigned long user_addr, size_t count, loff_t file_offset) 239 { 240 struct list_head *list = &dreq->list; 241 struct page **pages = dreq->pages; 242 unsigned int curpage, pgbase; 243 unsigned int rsize = NFS_SERVER(inode)->rsize; 244 245 curpage = 0; 246 pgbase = user_addr & ~PAGE_MASK; 247 do { 248 struct nfs_read_data *data; 249 unsigned int bytes; 250 251 bytes = rsize; 252 if (count < rsize) 253 bytes = count; 254 255 data = list_entry(list->next, struct nfs_read_data, pages); 256 list_del_init(&data->pages); 257 258 data->inode = inode; 259 data->cred = ctx->cred; 260 data->args.fh = NFS_FH(inode); 261 data->args.context = ctx; 262 data->args.offset = file_offset; 263 data->args.pgbase = pgbase; 264 data->args.pages = &pages[curpage]; 265 data->args.count = bytes; 266 data->res.fattr = &data->fattr; 267 data->res.eof = 0; 268 data->res.count = bytes; 269 270 NFS_PROTO(inode)->read_setup(data); 271 272 data->task.tk_cookie = (unsigned long) inode; 273 data->complete = nfs_direct_read_result; 274 275 lock_kernel(); 276 rpc_execute(&data->task); 277 unlock_kernel(); 278 279 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n", 280 data->task.tk_pid, 281 inode->i_sb->s_id, 282 (long long)NFS_FILEID(inode), 283 bytes, 284 (unsigned long long)data->args.offset); 285 286 file_offset += bytes; 287 pgbase += bytes; 288 curpage += pgbase >> PAGE_SHIFT; 289 pgbase &= ~PAGE_MASK; 290 291 count -= bytes; 292 } while (count != 0); 293 } 294 295 /** 296 * nfs_direct_read_wait - wait for I/O completion for direct reads 297 * @dreq: request on which we are to wait 298 * @intr: whether or not this wait can be interrupted 299 * 300 * Collects and returns the final error value/byte-count. 301 */ 302 static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) 303 { 304 int result = 0; 305 306 if (intr) { 307 result = wait_event_interruptible(dreq->wait, 308 (atomic_read(&dreq->complete) == 0)); 309 } else { 310 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0)); 311 } 312 313 if (!result) 314 result = atomic_read(&dreq->error); 315 if (!result) 316 result = atomic_read(&dreq->count); 317 318 kref_put(&dreq->kref, nfs_direct_req_release); 319 return (ssize_t) result; 320 } 321 322 /** 323 * nfs_direct_read_seg - Read in one iov segment. Generate separate 324 * read RPCs for each "rsize" bytes. 325 * @inode: target inode 326 * @ctx: target file open context 327 * @user_addr: starting address of this segment of user's buffer 328 * @count: size of this segment 329 * @file_offset: offset in file to begin the operation 330 * @pages: array of addresses of page structs defining user's buffer 331 * @nr_pages: number of pages in the array 332 * 333 */ 334 static ssize_t nfs_direct_read_seg(struct inode *inode, 335 struct nfs_open_context *ctx, unsigned long user_addr, 336 size_t count, loff_t file_offset, struct page **pages, 337 unsigned int nr_pages) 338 { 339 ssize_t result; 340 sigset_t oldset; 341 struct rpc_clnt *clnt = NFS_CLIENT(inode); 342 struct nfs_direct_req *dreq; 343 344 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); 345 if (!dreq) 346 return -ENOMEM; 347 348 dreq->pages = pages; 349 dreq->npages = nr_pages; 350 351 rpc_clnt_sigmask(clnt, &oldset); 352 nfs_direct_read_schedule(dreq, inode, ctx, user_addr, count, 353 file_offset); 354 result = nfs_direct_read_wait(dreq, clnt->cl_intr); 355 rpc_clnt_sigunmask(clnt, &oldset); 356 357 return result; 358 } 359 360 /** 361 * nfs_direct_read - For each iov segment, map the user's buffer 362 * then generate read RPCs. 363 * @inode: target inode 364 * @ctx: target file open context 365 * @iov: array of vectors that define I/O buffer 366 * file_offset: offset in file to begin the operation 367 * nr_segs: size of iovec array 368 * 369 * We've already pushed out any non-direct writes so that this read 370 * will see them when we read from the server. 371 */ 372 static ssize_t 373 nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx, 374 const struct iovec *iov, loff_t file_offset, 375 unsigned long nr_segs) 376 { 377 ssize_t tot_bytes = 0; 378 unsigned long seg = 0; 379 380 while ((seg < nr_segs) && (tot_bytes >= 0)) { 381 ssize_t result; 382 int page_count; 383 struct page **pages; 384 const struct iovec *vec = &iov[seg++]; 385 unsigned long user_addr = (unsigned long) vec->iov_base; 386 size_t size = vec->iov_len; 387 388 page_count = nfs_get_user_pages(READ, user_addr, size, &pages); 389 if (page_count < 0) { 390 nfs_free_user_pages(pages, 0, 0); 391 if (tot_bytes > 0) 392 break; 393 return page_count; 394 } 395 396 result = nfs_direct_read_seg(inode, ctx, user_addr, size, 397 file_offset, pages, page_count); 398 399 if (result <= 0) { 400 if (tot_bytes > 0) 401 break; 402 return result; 403 } 404 tot_bytes += result; 405 file_offset += result; 406 if (result < size) 407 break; 408 } 409 410 return tot_bytes; 411 } 412 413 /** 414 * nfs_direct_write_seg - Write out one iov segment. Generate separate 415 * write RPCs for each "wsize" bytes, then commit. 416 * @inode: target inode 417 * @ctx: target file open context 418 * user_addr: starting address of this segment of user's buffer 419 * count: size of this segment 420 * file_offset: offset in file to begin the operation 421 * @pages: array of addresses of page structs defining user's buffer 422 * nr_pages: size of pages array 423 */ 424 static ssize_t nfs_direct_write_seg(struct inode *inode, 425 struct nfs_open_context *ctx, unsigned long user_addr, 426 size_t count, loff_t file_offset, struct page **pages, 427 int nr_pages) 428 { 429 const unsigned int wsize = NFS_SERVER(inode)->wsize; 430 size_t request; 431 int curpage, need_commit; 432 ssize_t result, tot_bytes; 433 struct nfs_writeverf first_verf; 434 struct nfs_write_data *wdata; 435 436 wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); 437 if (!wdata) 438 return -ENOMEM; 439 440 wdata->inode = inode; 441 wdata->cred = ctx->cred; 442 wdata->args.fh = NFS_FH(inode); 443 wdata->args.context = ctx; 444 wdata->args.stable = NFS_UNSTABLE; 445 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize) 446 wdata->args.stable = NFS_FILE_SYNC; 447 wdata->res.fattr = &wdata->fattr; 448 wdata->res.verf = &wdata->verf; 449 450 nfs_begin_data_update(inode); 451 retry: 452 need_commit = 0; 453 tot_bytes = 0; 454 curpage = 0; 455 request = count; 456 wdata->args.pgbase = user_addr & ~PAGE_MASK; 457 wdata->args.offset = file_offset; 458 do { 459 wdata->args.count = request; 460 if (wdata->args.count > wsize) 461 wdata->args.count = wsize; 462 wdata->args.pages = &pages[curpage]; 463 464 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n", 465 wdata->args.count, (long long) wdata->args.offset, 466 user_addr + tot_bytes, wdata->args.pgbase, curpage); 467 468 lock_kernel(); 469 result = NFS_PROTO(inode)->write(wdata); 470 unlock_kernel(); 471 472 if (result <= 0) { 473 if (tot_bytes > 0) 474 break; 475 goto out; 476 } 477 478 if (tot_bytes == 0) 479 memcpy(&first_verf.verifier, &wdata->verf.verifier, 480 sizeof(first_verf.verifier)); 481 if (wdata->verf.committed != NFS_FILE_SYNC) { 482 need_commit = 1; 483 if (memcmp(&first_verf.verifier, &wdata->verf.verifier, 484 sizeof(first_verf.verifier))); 485 goto sync_retry; 486 } 487 488 tot_bytes += result; 489 490 /* in case of a short write: stop now, let the app recover */ 491 if (result < wdata->args.count) 492 break; 493 494 wdata->args.offset += result; 495 wdata->args.pgbase += result; 496 curpage += wdata->args.pgbase >> PAGE_SHIFT; 497 wdata->args.pgbase &= ~PAGE_MASK; 498 request -= result; 499 } while (request != 0); 500 501 /* 502 * Commit data written so far, even in the event of an error 503 */ 504 if (need_commit) { 505 wdata->args.count = tot_bytes; 506 wdata->args.offset = file_offset; 507 508 lock_kernel(); 509 result = NFS_PROTO(inode)->commit(wdata); 510 unlock_kernel(); 511 512 if (result < 0 || memcmp(&first_verf.verifier, 513 &wdata->verf.verifier, 514 sizeof(first_verf.verifier)) != 0) 515 goto sync_retry; 516 } 517 result = tot_bytes; 518 519 out: 520 nfs_end_data_update(inode); 521 nfs_writedata_free(wdata); 522 return result; 523 524 sync_retry: 525 wdata->args.stable = NFS_FILE_SYNC; 526 goto retry; 527 } 528 529 /** 530 * nfs_direct_write - For each iov segment, map the user's buffer 531 * then generate write and commit RPCs. 532 * @inode: target inode 533 * @ctx: target file open context 534 * @iov: array of vectors that define I/O buffer 535 * file_offset: offset in file to begin the operation 536 * nr_segs: size of iovec array 537 * 538 * Upon return, generic_file_direct_IO invalidates any cached pages 539 * that non-direct readers might access, so they will pick up these 540 * writes immediately. 541 */ 542 static ssize_t nfs_direct_write(struct inode *inode, 543 struct nfs_open_context *ctx, const struct iovec *iov, 544 loff_t file_offset, unsigned long nr_segs) 545 { 546 ssize_t tot_bytes = 0; 547 unsigned long seg = 0; 548 549 while ((seg < nr_segs) && (tot_bytes >= 0)) { 550 ssize_t result; 551 int page_count; 552 struct page **pages; 553 const struct iovec *vec = &iov[seg++]; 554 unsigned long user_addr = (unsigned long) vec->iov_base; 555 size_t size = vec->iov_len; 556 557 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages); 558 if (page_count < 0) { 559 nfs_free_user_pages(pages, 0, 0); 560 if (tot_bytes > 0) 561 break; 562 return page_count; 563 } 564 565 result = nfs_direct_write_seg(inode, ctx, user_addr, size, 566 file_offset, pages, page_count); 567 nfs_free_user_pages(pages, page_count, 0); 568 569 if (result <= 0) { 570 if (tot_bytes > 0) 571 break; 572 return result; 573 } 574 tot_bytes += result; 575 file_offset += result; 576 if (result < size) 577 break; 578 } 579 return tot_bytes; 580 } 581 582 /** 583 * nfs_direct_IO - NFS address space operation for direct I/O 584 * rw: direction (read or write) 585 * @iocb: target I/O control block 586 * @iov: array of vectors that define I/O buffer 587 * file_offset: offset in file to begin the operation 588 * nr_segs: size of iovec array 589 * 590 */ 591 ssize_t 592 nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 593 loff_t file_offset, unsigned long nr_segs) 594 { 595 ssize_t result = -EINVAL; 596 struct file *file = iocb->ki_filp; 597 struct nfs_open_context *ctx; 598 struct dentry *dentry = file->f_dentry; 599 struct inode *inode = dentry->d_inode; 600 601 /* 602 * No support for async yet 603 */ 604 if (!is_sync_kiocb(iocb)) 605 return result; 606 607 ctx = (struct nfs_open_context *)file->private_data; 608 switch (rw) { 609 case READ: 610 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n", 611 dentry->d_name.name, file_offset, nr_segs); 612 613 result = nfs_direct_read(inode, ctx, iov, 614 file_offset, nr_segs); 615 break; 616 case WRITE: 617 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n", 618 dentry->d_name.name, file_offset, nr_segs); 619 620 result = nfs_direct_write(inode, ctx, iov, 621 file_offset, nr_segs); 622 break; 623 default: 624 break; 625 } 626 return result; 627 } 628 629 /** 630 * nfs_file_direct_read - file direct read operation for NFS files 631 * @iocb: target I/O control block 632 * @buf: user's buffer into which to read data 633 * count: number of bytes to read 634 * pos: byte offset in file where reading starts 635 * 636 * We use this function for direct reads instead of calling 637 * generic_file_aio_read() in order to avoid gfar's check to see if 638 * the request starts before the end of the file. For that check 639 * to work, we must generate a GETATTR before each direct read, and 640 * even then there is a window between the GETATTR and the subsequent 641 * READ where the file size could change. So our preference is simply 642 * to do all reads the application wants, and the server will take 643 * care of managing the end of file boundary. 644 * 645 * This function also eliminates unnecessarily updating the file's 646 * atime locally, as the NFS server sets the file's atime, and this 647 * client must read the updated atime from the server back into its 648 * cache. 649 */ 650 ssize_t 651 nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 652 { 653 ssize_t retval = -EINVAL; 654 loff_t *ppos = &iocb->ki_pos; 655 struct file *file = iocb->ki_filp; 656 struct nfs_open_context *ctx = 657 (struct nfs_open_context *) file->private_data; 658 struct address_space *mapping = file->f_mapping; 659 struct inode *inode = mapping->host; 660 struct iovec iov = { 661 .iov_base = buf, 662 .iov_len = count, 663 }; 664 665 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", 666 file->f_dentry->d_parent->d_name.name, 667 file->f_dentry->d_name.name, 668 (unsigned long) count, (long long) pos); 669 670 if (!is_sync_kiocb(iocb)) 671 goto out; 672 if (count < 0) 673 goto out; 674 retval = -EFAULT; 675 if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len)) 676 goto out; 677 retval = 0; 678 if (!count) 679 goto out; 680 681 retval = nfs_sync_mapping(mapping); 682 if (retval) 683 goto out; 684 685 retval = nfs_direct_read(inode, ctx, &iov, pos, 1); 686 if (retval > 0) 687 *ppos = pos + retval; 688 689 out: 690 return retval; 691 } 692 693 /** 694 * nfs_file_direct_write - file direct write operation for NFS files 695 * @iocb: target I/O control block 696 * @buf: user's buffer from which to write data 697 * count: number of bytes to write 698 * pos: byte offset in file where writing starts 699 * 700 * We use this function for direct writes instead of calling 701 * generic_file_aio_write() in order to avoid taking the inode 702 * semaphore and updating the i_size. The NFS server will set 703 * the new i_size and this client must read the updated size 704 * back into its cache. We let the server do generic write 705 * parameter checking and report problems. 706 * 707 * We also avoid an unnecessary invocation of generic_osync_inode(), 708 * as it is fairly meaningless to sync the metadata of an NFS file. 709 * 710 * We eliminate local atime updates, see direct read above. 711 * 712 * We avoid unnecessary page cache invalidations for normal cached 713 * readers of this file. 714 * 715 * Note that O_APPEND is not supported for NFS direct writes, as there 716 * is no atomic O_APPEND write facility in the NFS protocol. 717 */ 718 ssize_t 719 nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) 720 { 721 ssize_t retval; 722 struct file *file = iocb->ki_filp; 723 struct nfs_open_context *ctx = 724 (struct nfs_open_context *) file->private_data; 725 struct address_space *mapping = file->f_mapping; 726 struct inode *inode = mapping->host; 727 struct iovec iov = { 728 .iov_base = (char __user *)buf, 729 }; 730 731 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n", 732 file->f_dentry->d_parent->d_name.name, 733 file->f_dentry->d_name.name, 734 (unsigned long) count, (long long) pos); 735 736 retval = -EINVAL; 737 if (!is_sync_kiocb(iocb)) 738 goto out; 739 740 retval = generic_write_checks(file, &pos, &count, 0); 741 if (retval) 742 goto out; 743 744 retval = -EINVAL; 745 if ((ssize_t) count < 0) 746 goto out; 747 retval = 0; 748 if (!count) 749 goto out; 750 iov.iov_len = count, 751 752 retval = -EFAULT; 753 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len)) 754 goto out; 755 756 retval = nfs_sync_mapping(mapping); 757 if (retval) 758 goto out; 759 760 retval = nfs_direct_write(inode, ctx, &iov, pos, 1); 761 if (mapping->nrpages) 762 invalidate_inode_pages2(mapping); 763 if (retval > 0) 764 iocb->ki_pos = pos + retval; 765 766 out: 767 return retval; 768 } 769 770 int nfs_init_directcache(void) 771 { 772 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache", 773 sizeof(struct nfs_direct_req), 774 0, SLAB_RECLAIM_ACCOUNT, 775 NULL, NULL); 776 if (nfs_direct_cachep == NULL) 777 return -ENOMEM; 778 779 return 0; 780 } 781 782 void nfs_destroy_directcache(void) 783 { 784 if (kmem_cache_destroy(nfs_direct_cachep)) 785 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); 786 } 787