1 /* 2 * "splice": joining two ropes together by interweaving their strands. 3 * 4 * This is the "extended pipe" functionality, where a pipe is used as 5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel 6 * buffer that you can use to transfer data from one end to the other. 7 * 8 * The traditional unix read/write is extended with a "splice()" operation 9 * that transfers data buffers to or from a pipe buffer. 10 * 11 * Named by Larry McVoy, original implementation from Linus, extended by 12 * Jens to support splicing to files, network, direct splicing, etc and 13 * fixing lots of bugs. 14 * 15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> 16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> 17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> 18 * 19 */ 20 #include <linux/fs.h> 21 #include <linux/file.h> 22 #include <linux/pagemap.h> 23 #include <linux/splice.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm_inline.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/export.h> 29 #include <linux/syscalls.h> 30 #include <linux/uio.h> 31 #include <linux/security.h> 32 #include <linux/gfp.h> 33 #include <linux/socket.h> 34 #include <linux/compat.h> 35 #include "internal.h" 36 37 /* 38 * Attempt to steal a page from a pipe buffer. This should perhaps go into 39 * a vm helper function, it's already simplified quite a bit by the 40 * addition of remove_mapping(). If success is returned, the caller may 41 * attempt to reuse this page for another destination. 42 */ 43 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, 44 struct pipe_buffer *buf) 45 { 46 struct page *page = buf->page; 47 struct address_space *mapping; 48 49 lock_page(page); 50 51 mapping = page_mapping(page); 52 if (mapping) { 53 WARN_ON(!PageUptodate(page)); 54 55 /* 56 * At least for ext2 with nobh option, we need to wait on 57 * writeback completing on this page, since we'll remove it 58 * from the pagecache. Otherwise truncate wont wait on the 59 * page, allowing the disk blocks to be reused by someone else 60 * before we actually wrote our data to them. fs corruption 61 * ensues. 62 */ 63 wait_on_page_writeback(page); 64 65 if (page_has_private(page) && 66 !try_to_release_page(page, GFP_KERNEL)) 67 goto out_unlock; 68 69 /* 70 * If we succeeded in removing the mapping, set LRU flag 71 * and return good. 72 */ 73 if (remove_mapping(mapping, page)) { 74 buf->flags |= PIPE_BUF_FLAG_LRU; 75 return 0; 76 } 77 } 78 79 /* 80 * Raced with truncate or failed to remove page from current 81 * address space, unlock and return failure. 82 */ 83 out_unlock: 84 unlock_page(page); 85 return 1; 86 } 87 88 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 page_cache_release(buf->page); 92 buf->flags &= ~PIPE_BUF_FLAG_LRU; 93 } 94 95 /* 96 * Check whether the contents of buf is OK to access. Since the content 97 * is a page cache page, IO may be in flight. 98 */ 99 static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, 100 struct pipe_buffer *buf) 101 { 102 struct page *page = buf->page; 103 int err; 104 105 if (!PageUptodate(page)) { 106 lock_page(page); 107 108 /* 109 * Page got truncated/unhashed. This will cause a 0-byte 110 * splice, if this is the first page. 111 */ 112 if (!page->mapping) { 113 err = -ENODATA; 114 goto error; 115 } 116 117 /* 118 * Uh oh, read-error from disk. 119 */ 120 if (!PageUptodate(page)) { 121 err = -EIO; 122 goto error; 123 } 124 125 /* 126 * Page is ok afterall, we are done. 127 */ 128 unlock_page(page); 129 } 130 131 return 0; 132 error: 133 unlock_page(page); 134 return err; 135 } 136 137 const struct pipe_buf_operations page_cache_pipe_buf_ops = { 138 .can_merge = 0, 139 .map = generic_pipe_buf_map, 140 .unmap = generic_pipe_buf_unmap, 141 .confirm = page_cache_pipe_buf_confirm, 142 .release = page_cache_pipe_buf_release, 143 .steal = page_cache_pipe_buf_steal, 144 .get = generic_pipe_buf_get, 145 }; 146 147 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 148 struct pipe_buffer *buf) 149 { 150 if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) 151 return 1; 152 153 buf->flags |= PIPE_BUF_FLAG_LRU; 154 return generic_pipe_buf_steal(pipe, buf); 155 } 156 157 static const struct pipe_buf_operations user_page_pipe_buf_ops = { 158 .can_merge = 0, 159 .map = generic_pipe_buf_map, 160 .unmap = generic_pipe_buf_unmap, 161 .confirm = generic_pipe_buf_confirm, 162 .release = page_cache_pipe_buf_release, 163 .steal = user_page_pipe_buf_steal, 164 .get = generic_pipe_buf_get, 165 }; 166 167 static void wakeup_pipe_readers(struct pipe_inode_info *pipe) 168 { 169 smp_mb(); 170 if (waitqueue_active(&pipe->wait)) 171 wake_up_interruptible(&pipe->wait); 172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 173 } 174 175 /** 176 * splice_to_pipe - fill passed data into a pipe 177 * @pipe: pipe to fill 178 * @spd: data to fill 179 * 180 * Description: 181 * @spd contains a map of pages and len/offset tuples, along with 182 * the struct pipe_buf_operations associated with these pages. This 183 * function will link that data to the pipe. 184 * 185 */ 186 ssize_t splice_to_pipe(struct pipe_inode_info *pipe, 187 struct splice_pipe_desc *spd) 188 { 189 unsigned int spd_pages = spd->nr_pages; 190 int ret, do_wakeup, page_nr; 191 192 ret = 0; 193 do_wakeup = 0; 194 page_nr = 0; 195 196 pipe_lock(pipe); 197 198 for (;;) { 199 if (!pipe->readers) { 200 send_sig(SIGPIPE, current, 0); 201 if (!ret) 202 ret = -EPIPE; 203 break; 204 } 205 206 if (pipe->nrbufs < pipe->buffers) { 207 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 208 struct pipe_buffer *buf = pipe->bufs + newbuf; 209 210 buf->page = spd->pages[page_nr]; 211 buf->offset = spd->partial[page_nr].offset; 212 buf->len = spd->partial[page_nr].len; 213 buf->private = spd->partial[page_nr].private; 214 buf->ops = spd->ops; 215 if (spd->flags & SPLICE_F_GIFT) 216 buf->flags |= PIPE_BUF_FLAG_GIFT; 217 218 pipe->nrbufs++; 219 page_nr++; 220 ret += buf->len; 221 222 if (pipe->files) 223 do_wakeup = 1; 224 225 if (!--spd->nr_pages) 226 break; 227 if (pipe->nrbufs < pipe->buffers) 228 continue; 229 230 break; 231 } 232 233 if (spd->flags & SPLICE_F_NONBLOCK) { 234 if (!ret) 235 ret = -EAGAIN; 236 break; 237 } 238 239 if (signal_pending(current)) { 240 if (!ret) 241 ret = -ERESTARTSYS; 242 break; 243 } 244 245 if (do_wakeup) { 246 smp_mb(); 247 if (waitqueue_active(&pipe->wait)) 248 wake_up_interruptible_sync(&pipe->wait); 249 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 250 do_wakeup = 0; 251 } 252 253 pipe->waiting_writers++; 254 pipe_wait(pipe); 255 pipe->waiting_writers--; 256 } 257 258 pipe_unlock(pipe); 259 260 if (do_wakeup) 261 wakeup_pipe_readers(pipe); 262 263 while (page_nr < spd_pages) 264 spd->spd_release(spd, page_nr++); 265 266 return ret; 267 } 268 269 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) 270 { 271 page_cache_release(spd->pages[i]); 272 } 273 274 /* 275 * Check if we need to grow the arrays holding pages and partial page 276 * descriptions. 277 */ 278 int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) 279 { 280 unsigned int buffers = ACCESS_ONCE(pipe->buffers); 281 282 spd->nr_pages_max = buffers; 283 if (buffers <= PIPE_DEF_BUFFERS) 284 return 0; 285 286 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); 287 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); 288 289 if (spd->pages && spd->partial) 290 return 0; 291 292 kfree(spd->pages); 293 kfree(spd->partial); 294 return -ENOMEM; 295 } 296 297 void splice_shrink_spd(struct splice_pipe_desc *spd) 298 { 299 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) 300 return; 301 302 kfree(spd->pages); 303 kfree(spd->partial); 304 } 305 306 static int 307 __generic_file_splice_read(struct file *in, loff_t *ppos, 308 struct pipe_inode_info *pipe, size_t len, 309 unsigned int flags) 310 { 311 struct address_space *mapping = in->f_mapping; 312 unsigned int loff, nr_pages, req_pages; 313 struct page *pages[PIPE_DEF_BUFFERS]; 314 struct partial_page partial[PIPE_DEF_BUFFERS]; 315 struct page *page; 316 pgoff_t index, end_index; 317 loff_t isize; 318 int error, page_nr; 319 struct splice_pipe_desc spd = { 320 .pages = pages, 321 .partial = partial, 322 .nr_pages_max = PIPE_DEF_BUFFERS, 323 .flags = flags, 324 .ops = &page_cache_pipe_buf_ops, 325 .spd_release = spd_release_page, 326 }; 327 328 if (splice_grow_spd(pipe, &spd)) 329 return -ENOMEM; 330 331 index = *ppos >> PAGE_CACHE_SHIFT; 332 loff = *ppos & ~PAGE_CACHE_MASK; 333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 334 nr_pages = min(req_pages, spd.nr_pages_max); 335 336 /* 337 * Lookup the (hopefully) full range of pages we need. 338 */ 339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); 340 index += spd.nr_pages; 341 342 /* 343 * If find_get_pages_contig() returned fewer pages than we needed, 344 * readahead/allocate the rest and fill in the holes. 345 */ 346 if (spd.nr_pages < nr_pages) 347 page_cache_sync_readahead(mapping, &in->f_ra, in, 348 index, req_pages - spd.nr_pages); 349 350 error = 0; 351 while (spd.nr_pages < nr_pages) { 352 /* 353 * Page could be there, find_get_pages_contig() breaks on 354 * the first hole. 355 */ 356 page = find_get_page(mapping, index); 357 if (!page) { 358 /* 359 * page didn't exist, allocate one. 360 */ 361 page = page_cache_alloc_cold(mapping); 362 if (!page) 363 break; 364 365 error = add_to_page_cache_lru(page, mapping, index, 366 GFP_KERNEL); 367 if (unlikely(error)) { 368 page_cache_release(page); 369 if (error == -EEXIST) 370 continue; 371 break; 372 } 373 /* 374 * add_to_page_cache() locks the page, unlock it 375 * to avoid convoluting the logic below even more. 376 */ 377 unlock_page(page); 378 } 379 380 spd.pages[spd.nr_pages++] = page; 381 index++; 382 } 383 384 /* 385 * Now loop over the map and see if we need to start IO on any 386 * pages, fill in the partial map, etc. 387 */ 388 index = *ppos >> PAGE_CACHE_SHIFT; 389 nr_pages = spd.nr_pages; 390 spd.nr_pages = 0; 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 392 unsigned int this_len; 393 394 if (!len) 395 break; 396 397 /* 398 * this_len is the max we'll use from this page 399 */ 400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 401 page = spd.pages[page_nr]; 402 403 if (PageReadahead(page)) 404 page_cache_async_readahead(mapping, &in->f_ra, in, 405 page, index, req_pages - page_nr); 406 407 /* 408 * If the page isn't uptodate, we may need to start io on it 409 */ 410 if (!PageUptodate(page)) { 411 lock_page(page); 412 413 /* 414 * Page was truncated, or invalidated by the 415 * filesystem. Redo the find/create, but this time the 416 * page is kept locked, so there's no chance of another 417 * race with truncate/invalidate. 418 */ 419 if (!page->mapping) { 420 unlock_page(page); 421 page = find_or_create_page(mapping, index, 422 mapping_gfp_mask(mapping)); 423 424 if (!page) { 425 error = -ENOMEM; 426 break; 427 } 428 page_cache_release(spd.pages[page_nr]); 429 spd.pages[page_nr] = page; 430 } 431 /* 432 * page was already under io and is now done, great 433 */ 434 if (PageUptodate(page)) { 435 unlock_page(page); 436 goto fill_it; 437 } 438 439 /* 440 * need to read in the page 441 */ 442 error = mapping->a_ops->readpage(in, page); 443 if (unlikely(error)) { 444 /* 445 * We really should re-lookup the page here, 446 * but it complicates things a lot. Instead 447 * lets just do what we already stored, and 448 * we'll get it the next time we are called. 449 */ 450 if (error == AOP_TRUNCATED_PAGE) 451 error = 0; 452 453 break; 454 } 455 } 456 fill_it: 457 /* 458 * i_size must be checked after PageUptodate. 459 */ 460 isize = i_size_read(mapping->host); 461 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 462 if (unlikely(!isize || index > end_index)) 463 break; 464 465 /* 466 * if this is the last page, see if we need to shrink 467 * the length and stop 468 */ 469 if (end_index == index) { 470 unsigned int plen; 471 472 /* 473 * max good bytes in this page 474 */ 475 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 476 if (plen <= loff) 477 break; 478 479 /* 480 * force quit after adding this page 481 */ 482 this_len = min(this_len, plen - loff); 483 len = this_len; 484 } 485 486 spd.partial[page_nr].offset = loff; 487 spd.partial[page_nr].len = this_len; 488 len -= this_len; 489 loff = 0; 490 spd.nr_pages++; 491 index++; 492 } 493 494 /* 495 * Release any pages at the end, if we quit early. 'page_nr' is how far 496 * we got, 'nr_pages' is how many pages are in the map. 497 */ 498 while (page_nr < nr_pages) 499 page_cache_release(spd.pages[page_nr++]); 500 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 501 502 if (spd.nr_pages) 503 error = splice_to_pipe(pipe, &spd); 504 505 splice_shrink_spd(&spd); 506 return error; 507 } 508 509 /** 510 * generic_file_splice_read - splice data from file to a pipe 511 * @in: file to splice from 512 * @ppos: position in @in 513 * @pipe: pipe to splice to 514 * @len: number of bytes to splice 515 * @flags: splice modifier flags 516 * 517 * Description: 518 * Will read pages from given file and fill them into a pipe. Can be 519 * used as long as the address_space operations for the source implements 520 * a readpage() hook. 521 * 522 */ 523 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, 524 struct pipe_inode_info *pipe, size_t len, 525 unsigned int flags) 526 { 527 loff_t isize, left; 528 int ret; 529 530 isize = i_size_read(in->f_mapping->host); 531 if (unlikely(*ppos >= isize)) 532 return 0; 533 534 left = isize - *ppos; 535 if (unlikely(left < len)) 536 len = left; 537 538 ret = __generic_file_splice_read(in, ppos, pipe, len, flags); 539 if (ret > 0) { 540 *ppos += ret; 541 file_accessed(in); 542 } 543 544 return ret; 545 } 546 EXPORT_SYMBOL(generic_file_splice_read); 547 548 static const struct pipe_buf_operations default_pipe_buf_ops = { 549 .can_merge = 0, 550 .map = generic_pipe_buf_map, 551 .unmap = generic_pipe_buf_unmap, 552 .confirm = generic_pipe_buf_confirm, 553 .release = generic_pipe_buf_release, 554 .steal = generic_pipe_buf_steal, 555 .get = generic_pipe_buf_get, 556 }; 557 558 static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, 559 struct pipe_buffer *buf) 560 { 561 return 1; 562 } 563 564 /* Pipe buffer operations for a socket and similar. */ 565 const struct pipe_buf_operations nosteal_pipe_buf_ops = { 566 .can_merge = 0, 567 .map = generic_pipe_buf_map, 568 .unmap = generic_pipe_buf_unmap, 569 .confirm = generic_pipe_buf_confirm, 570 .release = generic_pipe_buf_release, 571 .steal = generic_pipe_buf_nosteal, 572 .get = generic_pipe_buf_get, 573 }; 574 EXPORT_SYMBOL(nosteal_pipe_buf_ops); 575 576 static ssize_t kernel_readv(struct file *file, const struct iovec *vec, 577 unsigned long vlen, loff_t offset) 578 { 579 mm_segment_t old_fs; 580 loff_t pos = offset; 581 ssize_t res; 582 583 old_fs = get_fs(); 584 set_fs(get_ds()); 585 /* The cast to a user pointer is valid due to the set_fs() */ 586 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); 587 set_fs(old_fs); 588 589 return res; 590 } 591 592 ssize_t kernel_write(struct file *file, const char *buf, size_t count, 593 loff_t pos) 594 { 595 mm_segment_t old_fs; 596 ssize_t res; 597 598 old_fs = get_fs(); 599 set_fs(get_ds()); 600 /* The cast to a user pointer is valid due to the set_fs() */ 601 res = vfs_write(file, (__force const char __user *)buf, count, &pos); 602 set_fs(old_fs); 603 604 return res; 605 } 606 EXPORT_SYMBOL(kernel_write); 607 608 ssize_t default_file_splice_read(struct file *in, loff_t *ppos, 609 struct pipe_inode_info *pipe, size_t len, 610 unsigned int flags) 611 { 612 unsigned int nr_pages; 613 unsigned int nr_freed; 614 size_t offset; 615 struct page *pages[PIPE_DEF_BUFFERS]; 616 struct partial_page partial[PIPE_DEF_BUFFERS]; 617 struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; 618 ssize_t res; 619 size_t this_len; 620 int error; 621 int i; 622 struct splice_pipe_desc spd = { 623 .pages = pages, 624 .partial = partial, 625 .nr_pages_max = PIPE_DEF_BUFFERS, 626 .flags = flags, 627 .ops = &default_pipe_buf_ops, 628 .spd_release = spd_release_page, 629 }; 630 631 if (splice_grow_spd(pipe, &spd)) 632 return -ENOMEM; 633 634 res = -ENOMEM; 635 vec = __vec; 636 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { 637 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); 638 if (!vec) 639 goto shrink_ret; 640 } 641 642 offset = *ppos & ~PAGE_CACHE_MASK; 643 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 644 645 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { 646 struct page *page; 647 648 page = alloc_page(GFP_USER); 649 error = -ENOMEM; 650 if (!page) 651 goto err; 652 653 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 654 vec[i].iov_base = (void __user *) page_address(page); 655 vec[i].iov_len = this_len; 656 spd.pages[i] = page; 657 spd.nr_pages++; 658 len -= this_len; 659 offset = 0; 660 } 661 662 res = kernel_readv(in, vec, spd.nr_pages, *ppos); 663 if (res < 0) { 664 error = res; 665 goto err; 666 } 667 668 error = 0; 669 if (!res) 670 goto err; 671 672 nr_freed = 0; 673 for (i = 0; i < spd.nr_pages; i++) { 674 this_len = min_t(size_t, vec[i].iov_len, res); 675 spd.partial[i].offset = 0; 676 spd.partial[i].len = this_len; 677 if (!this_len) { 678 __free_page(spd.pages[i]); 679 spd.pages[i] = NULL; 680 nr_freed++; 681 } 682 res -= this_len; 683 } 684 spd.nr_pages -= nr_freed; 685 686 res = splice_to_pipe(pipe, &spd); 687 if (res > 0) 688 *ppos += res; 689 690 shrink_ret: 691 if (vec != __vec) 692 kfree(vec); 693 splice_shrink_spd(&spd); 694 return res; 695 696 err: 697 for (i = 0; i < spd.nr_pages; i++) 698 __free_page(spd.pages[i]); 699 700 res = error; 701 goto shrink_ret; 702 } 703 EXPORT_SYMBOL(default_file_splice_read); 704 705 /* 706 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 707 * using sendpage(). Return the number of bytes sent. 708 */ 709 static int pipe_to_sendpage(struct pipe_inode_info *pipe, 710 struct pipe_buffer *buf, struct splice_desc *sd) 711 { 712 struct file *file = sd->u.file; 713 loff_t pos = sd->pos; 714 int more; 715 716 if (!likely(file->f_op->sendpage)) 717 return -EINVAL; 718 719 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; 720 721 if (sd->len < sd->total_len && pipe->nrbufs > 1) 722 more |= MSG_SENDPAGE_NOTLAST; 723 724 return file->f_op->sendpage(file, buf->page, buf->offset, 725 sd->len, &pos, more); 726 } 727 728 /* 729 * This is a little more tricky than the file -> pipe splicing. There are 730 * basically three cases: 731 * 732 * - Destination page already exists in the address space and there 733 * are users of it. For that case we have no other option that 734 * copying the data. Tough luck. 735 * - Destination page already exists in the address space, but there 736 * are no users of it. Make sure it's uptodate, then drop it. Fall 737 * through to last case. 738 * - Destination page does not exist, we can add the pipe page to 739 * the page cache and avoid the copy. 740 * 741 * If asked to move pages to the output file (SPLICE_F_MOVE is set in 742 * sd->flags), we attempt to migrate pages from the pipe to the output 743 * file address space page cache. This is possible if no one else has 744 * the pipe page referenced outside of the pipe and page cache. If 745 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 746 * a new page in the output file page cache and fill/dirty that. 747 */ 748 int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 749 struct splice_desc *sd) 750 { 751 struct file *file = sd->u.file; 752 struct address_space *mapping = file->f_mapping; 753 unsigned int offset, this_len; 754 struct page *page; 755 void *fsdata; 756 int ret; 757 758 offset = sd->pos & ~PAGE_CACHE_MASK; 759 760 this_len = sd->len; 761 if (this_len + offset > PAGE_CACHE_SIZE) 762 this_len = PAGE_CACHE_SIZE - offset; 763 764 ret = pagecache_write_begin(file, mapping, sd->pos, this_len, 765 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 766 if (unlikely(ret)) 767 goto out; 768 769 if (buf->page != page) { 770 char *src = buf->ops->map(pipe, buf, 1); 771 char *dst = kmap_atomic(page); 772 773 memcpy(dst + offset, src + buf->offset, this_len); 774 flush_dcache_page(page); 775 kunmap_atomic(dst); 776 buf->ops->unmap(pipe, buf, src); 777 } 778 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 779 page, fsdata); 780 out: 781 return ret; 782 } 783 EXPORT_SYMBOL(pipe_to_file); 784 785 static void wakeup_pipe_writers(struct pipe_inode_info *pipe) 786 { 787 smp_mb(); 788 if (waitqueue_active(&pipe->wait)) 789 wake_up_interruptible(&pipe->wait); 790 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 791 } 792 793 /** 794 * splice_from_pipe_feed - feed available data from a pipe to a file 795 * @pipe: pipe to splice from 796 * @sd: information to @actor 797 * @actor: handler that splices the data 798 * 799 * Description: 800 * This function loops over the pipe and calls @actor to do the 801 * actual moving of a single struct pipe_buffer to the desired 802 * destination. It returns when there's no more buffers left in 803 * the pipe or if the requested number of bytes (@sd->total_len) 804 * have been copied. It returns a positive number (one) if the 805 * pipe needs to be filled with more data, zero if the required 806 * number of bytes have been copied and -errno on error. 807 * 808 * This, together with splice_from_pipe_{begin,end,next}, may be 809 * used to implement the functionality of __splice_from_pipe() when 810 * locking is required around copying the pipe buffers to the 811 * destination. 812 */ 813 int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, 814 splice_actor *actor) 815 { 816 int ret; 817 818 while (pipe->nrbufs) { 819 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 820 const struct pipe_buf_operations *ops = buf->ops; 821 822 sd->len = buf->len; 823 if (sd->len > sd->total_len) 824 sd->len = sd->total_len; 825 826 ret = buf->ops->confirm(pipe, buf); 827 if (unlikely(ret)) { 828 if (ret == -ENODATA) 829 ret = 0; 830 return ret; 831 } 832 833 ret = actor(pipe, buf, sd); 834 if (ret <= 0) 835 return ret; 836 837 buf->offset += ret; 838 buf->len -= ret; 839 840 sd->num_spliced += ret; 841 sd->len -= ret; 842 sd->pos += ret; 843 sd->total_len -= ret; 844 845 if (!buf->len) { 846 buf->ops = NULL; 847 ops->release(pipe, buf); 848 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); 849 pipe->nrbufs--; 850 if (pipe->files) 851 sd->need_wakeup = true; 852 } 853 854 if (!sd->total_len) 855 return 0; 856 } 857 858 return 1; 859 } 860 EXPORT_SYMBOL(splice_from_pipe_feed); 861 862 /** 863 * splice_from_pipe_next - wait for some data to splice from 864 * @pipe: pipe to splice from 865 * @sd: information about the splice operation 866 * 867 * Description: 868 * This function will wait for some data and return a positive 869 * value (one) if pipe buffers are available. It will return zero 870 * or -errno if no more data needs to be spliced. 871 */ 872 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) 873 { 874 while (!pipe->nrbufs) { 875 if (!pipe->writers) 876 return 0; 877 878 if (!pipe->waiting_writers && sd->num_spliced) 879 return 0; 880 881 if (sd->flags & SPLICE_F_NONBLOCK) 882 return -EAGAIN; 883 884 if (signal_pending(current)) 885 return -ERESTARTSYS; 886 887 if (sd->need_wakeup) { 888 wakeup_pipe_writers(pipe); 889 sd->need_wakeup = false; 890 } 891 892 pipe_wait(pipe); 893 } 894 895 return 1; 896 } 897 EXPORT_SYMBOL(splice_from_pipe_next); 898 899 /** 900 * splice_from_pipe_begin - start splicing from pipe 901 * @sd: information about the splice operation 902 * 903 * Description: 904 * This function should be called before a loop containing 905 * splice_from_pipe_next() and splice_from_pipe_feed() to 906 * initialize the necessary fields of @sd. 907 */ 908 void splice_from_pipe_begin(struct splice_desc *sd) 909 { 910 sd->num_spliced = 0; 911 sd->need_wakeup = false; 912 } 913 EXPORT_SYMBOL(splice_from_pipe_begin); 914 915 /** 916 * splice_from_pipe_end - finish splicing from pipe 917 * @pipe: pipe to splice from 918 * @sd: information about the splice operation 919 * 920 * Description: 921 * This function will wake up pipe writers if necessary. It should 922 * be called after a loop containing splice_from_pipe_next() and 923 * splice_from_pipe_feed(). 924 */ 925 void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) 926 { 927 if (sd->need_wakeup) 928 wakeup_pipe_writers(pipe); 929 } 930 EXPORT_SYMBOL(splice_from_pipe_end); 931 932 /** 933 * __splice_from_pipe - splice data from a pipe to given actor 934 * @pipe: pipe to splice from 935 * @sd: information to @actor 936 * @actor: handler that splices the data 937 * 938 * Description: 939 * This function does little more than loop over the pipe and call 940 * @actor to do the actual moving of a single struct pipe_buffer to 941 * the desired destination. See pipe_to_file, pipe_to_sendpage, or 942 * pipe_to_user. 943 * 944 */ 945 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, 946 splice_actor *actor) 947 { 948 int ret; 949 950 splice_from_pipe_begin(sd); 951 do { 952 ret = splice_from_pipe_next(pipe, sd); 953 if (ret > 0) 954 ret = splice_from_pipe_feed(pipe, sd, actor); 955 } while (ret > 0); 956 splice_from_pipe_end(pipe, sd); 957 958 return sd->num_spliced ? sd->num_spliced : ret; 959 } 960 EXPORT_SYMBOL(__splice_from_pipe); 961 962 /** 963 * splice_from_pipe - splice data from a pipe to a file 964 * @pipe: pipe to splice from 965 * @out: file to splice to 966 * @ppos: position in @out 967 * @len: how many bytes to splice 968 * @flags: splice modifier flags 969 * @actor: handler that splices the data 970 * 971 * Description: 972 * See __splice_from_pipe. This function locks the pipe inode, 973 * otherwise it's identical to __splice_from_pipe(). 974 * 975 */ 976 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, 977 loff_t *ppos, size_t len, unsigned int flags, 978 splice_actor *actor) 979 { 980 ssize_t ret; 981 struct splice_desc sd = { 982 .total_len = len, 983 .flags = flags, 984 .pos = *ppos, 985 .u.file = out, 986 }; 987 988 pipe_lock(pipe); 989 ret = __splice_from_pipe(pipe, &sd, actor); 990 pipe_unlock(pipe); 991 992 return ret; 993 } 994 995 /** 996 * generic_file_splice_write - splice data from a pipe to a file 997 * @pipe: pipe info 998 * @out: file to write to 999 * @ppos: position in @out 1000 * @len: number of bytes to splice 1001 * @flags: splice modifier flags 1002 * 1003 * Description: 1004 * Will either move or copy pages (determined by @flags options) from 1005 * the given pipe inode to the given file. 1006 * 1007 */ 1008 ssize_t 1009 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, 1010 loff_t *ppos, size_t len, unsigned int flags) 1011 { 1012 struct address_space *mapping = out->f_mapping; 1013 struct inode *inode = mapping->host; 1014 struct splice_desc sd = { 1015 .total_len = len, 1016 .flags = flags, 1017 .pos = *ppos, 1018 .u.file = out, 1019 }; 1020 ssize_t ret; 1021 1022 pipe_lock(pipe); 1023 1024 splice_from_pipe_begin(&sd); 1025 do { 1026 ret = splice_from_pipe_next(pipe, &sd); 1027 if (ret <= 0) 1028 break; 1029 1030 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); 1031 ret = file_remove_suid(out); 1032 if (!ret) { 1033 ret = file_update_time(out); 1034 if (!ret) 1035 ret = splice_from_pipe_feed(pipe, &sd, 1036 pipe_to_file); 1037 } 1038 mutex_unlock(&inode->i_mutex); 1039 } while (ret > 0); 1040 splice_from_pipe_end(pipe, &sd); 1041 1042 pipe_unlock(pipe); 1043 1044 if (sd.num_spliced) 1045 ret = sd.num_spliced; 1046 1047 if (ret > 0) { 1048 int err; 1049 1050 err = generic_write_sync(out, *ppos, ret); 1051 if (err) 1052 ret = err; 1053 else 1054 *ppos += ret; 1055 balance_dirty_pages_ratelimited(mapping); 1056 } 1057 1058 return ret; 1059 } 1060 1061 EXPORT_SYMBOL(generic_file_splice_write); 1062 1063 static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1064 struct splice_desc *sd) 1065 { 1066 int ret; 1067 void *data; 1068 loff_t tmp = sd->pos; 1069 1070 data = buf->ops->map(pipe, buf, 0); 1071 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); 1072 buf->ops->unmap(pipe, buf, data); 1073 1074 return ret; 1075 } 1076 1077 static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, 1078 struct file *out, loff_t *ppos, 1079 size_t len, unsigned int flags) 1080 { 1081 ssize_t ret; 1082 1083 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); 1084 if (ret > 0) 1085 *ppos += ret; 1086 1087 return ret; 1088 } 1089 1090 /** 1091 * generic_splice_sendpage - splice data from a pipe to a socket 1092 * @pipe: pipe to splice from 1093 * @out: socket to write to 1094 * @ppos: position in @out 1095 * @len: number of bytes to splice 1096 * @flags: splice modifier flags 1097 * 1098 * Description: 1099 * Will send @len bytes from the pipe to a network socket. No data copying 1100 * is involved. 1101 * 1102 */ 1103 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, 1104 loff_t *ppos, size_t len, unsigned int flags) 1105 { 1106 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); 1107 } 1108 1109 EXPORT_SYMBOL(generic_splice_sendpage); 1110 1111 /* 1112 * Attempt to initiate a splice from pipe to file. 1113 */ 1114 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 1115 loff_t *ppos, size_t len, unsigned int flags) 1116 { 1117 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, 1118 loff_t *, size_t, unsigned int); 1119 1120 if (out->f_op->splice_write) 1121 splice_write = out->f_op->splice_write; 1122 else 1123 splice_write = default_file_splice_write; 1124 1125 return splice_write(pipe, out, ppos, len, flags); 1126 } 1127 1128 /* 1129 * Attempt to initiate a splice from a file to a pipe. 1130 */ 1131 static long do_splice_to(struct file *in, loff_t *ppos, 1132 struct pipe_inode_info *pipe, size_t len, 1133 unsigned int flags) 1134 { 1135 ssize_t (*splice_read)(struct file *, loff_t *, 1136 struct pipe_inode_info *, size_t, unsigned int); 1137 int ret; 1138 1139 if (unlikely(!(in->f_mode & FMODE_READ))) 1140 return -EBADF; 1141 1142 ret = rw_verify_area(READ, in, ppos, len); 1143 if (unlikely(ret < 0)) 1144 return ret; 1145 1146 if (in->f_op->splice_read) 1147 splice_read = in->f_op->splice_read; 1148 else 1149 splice_read = default_file_splice_read; 1150 1151 return splice_read(in, ppos, pipe, len, flags); 1152 } 1153 1154 /** 1155 * splice_direct_to_actor - splices data directly between two non-pipes 1156 * @in: file to splice from 1157 * @sd: actor information on where to splice to 1158 * @actor: handles the data splicing 1159 * 1160 * Description: 1161 * This is a special case helper to splice directly between two 1162 * points, without requiring an explicit pipe. Internally an allocated 1163 * pipe is cached in the process, and reused during the lifetime of 1164 * that process. 1165 * 1166 */ 1167 ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, 1168 splice_direct_actor *actor) 1169 { 1170 struct pipe_inode_info *pipe; 1171 long ret, bytes; 1172 umode_t i_mode; 1173 size_t len; 1174 int i, flags; 1175 1176 /* 1177 * We require the input being a regular file, as we don't want to 1178 * randomly drop data for eg socket -> socket splicing. Use the 1179 * piped splicing for that! 1180 */ 1181 i_mode = file_inode(in)->i_mode; 1182 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) 1183 return -EINVAL; 1184 1185 /* 1186 * neither in nor out is a pipe, setup an internal pipe attached to 1187 * 'out' and transfer the wanted data from 'in' to 'out' through that 1188 */ 1189 pipe = current->splice_pipe; 1190 if (unlikely(!pipe)) { 1191 pipe = alloc_pipe_info(); 1192 if (!pipe) 1193 return -ENOMEM; 1194 1195 /* 1196 * We don't have an immediate reader, but we'll read the stuff 1197 * out of the pipe right after the splice_to_pipe(). So set 1198 * PIPE_READERS appropriately. 1199 */ 1200 pipe->readers = 1; 1201 1202 current->splice_pipe = pipe; 1203 } 1204 1205 /* 1206 * Do the splice. 1207 */ 1208 ret = 0; 1209 bytes = 0; 1210 len = sd->total_len; 1211 flags = sd->flags; 1212 1213 /* 1214 * Don't block on output, we have to drain the direct pipe. 1215 */ 1216 sd->flags &= ~SPLICE_F_NONBLOCK; 1217 1218 while (len) { 1219 size_t read_len; 1220 loff_t pos = sd->pos, prev_pos = pos; 1221 1222 ret = do_splice_to(in, &pos, pipe, len, flags); 1223 if (unlikely(ret <= 0)) 1224 goto out_release; 1225 1226 read_len = ret; 1227 sd->total_len = read_len; 1228 1229 /* 1230 * NOTE: nonblocking mode only applies to the input. We 1231 * must not do the output in nonblocking mode as then we 1232 * could get stuck data in the internal pipe: 1233 */ 1234 ret = actor(pipe, sd); 1235 if (unlikely(ret <= 0)) { 1236 sd->pos = prev_pos; 1237 goto out_release; 1238 } 1239 1240 bytes += ret; 1241 len -= ret; 1242 sd->pos = pos; 1243 1244 if (ret < read_len) { 1245 sd->pos = prev_pos + ret; 1246 goto out_release; 1247 } 1248 } 1249 1250 done: 1251 pipe->nrbufs = pipe->curbuf = 0; 1252 file_accessed(in); 1253 return bytes; 1254 1255 out_release: 1256 /* 1257 * If we did an incomplete transfer we must release 1258 * the pipe buffers in question: 1259 */ 1260 for (i = 0; i < pipe->buffers; i++) { 1261 struct pipe_buffer *buf = pipe->bufs + i; 1262 1263 if (buf->ops) { 1264 buf->ops->release(pipe, buf); 1265 buf->ops = NULL; 1266 } 1267 } 1268 1269 if (!bytes) 1270 bytes = ret; 1271 1272 goto done; 1273 } 1274 EXPORT_SYMBOL(splice_direct_to_actor); 1275 1276 static int direct_splice_actor(struct pipe_inode_info *pipe, 1277 struct splice_desc *sd) 1278 { 1279 struct file *file = sd->u.file; 1280 1281 return do_splice_from(pipe, file, sd->opos, sd->total_len, 1282 sd->flags); 1283 } 1284 1285 /** 1286 * do_splice_direct - splices data directly between two files 1287 * @in: file to splice from 1288 * @ppos: input file offset 1289 * @out: file to splice to 1290 * @opos: output file offset 1291 * @len: number of bytes to splice 1292 * @flags: splice modifier flags 1293 * 1294 * Description: 1295 * For use by do_sendfile(). splice can easily emulate sendfile, but 1296 * doing it in the application would incur an extra system call 1297 * (splice in + splice out, as compared to just sendfile()). So this helper 1298 * can splice directly through a process-private pipe. 1299 * 1300 */ 1301 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 1302 loff_t *opos, size_t len, unsigned int flags) 1303 { 1304 struct splice_desc sd = { 1305 .len = len, 1306 .total_len = len, 1307 .flags = flags, 1308 .pos = *ppos, 1309 .u.file = out, 1310 .opos = opos, 1311 }; 1312 long ret; 1313 1314 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1315 return -EBADF; 1316 1317 if (unlikely(out->f_flags & O_APPEND)) 1318 return -EINVAL; 1319 1320 ret = rw_verify_area(WRITE, out, opos, len); 1321 if (unlikely(ret < 0)) 1322 return ret; 1323 1324 ret = splice_direct_to_actor(in, &sd, direct_splice_actor); 1325 if (ret > 0) 1326 *ppos = sd.pos; 1327 1328 return ret; 1329 } 1330 1331 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1332 struct pipe_inode_info *opipe, 1333 size_t len, unsigned int flags); 1334 1335 /* 1336 * Determine where to splice to/from. 1337 */ 1338 static long do_splice(struct file *in, loff_t __user *off_in, 1339 struct file *out, loff_t __user *off_out, 1340 size_t len, unsigned int flags) 1341 { 1342 struct pipe_inode_info *ipipe; 1343 struct pipe_inode_info *opipe; 1344 loff_t offset; 1345 long ret; 1346 1347 ipipe = get_pipe_info(in); 1348 opipe = get_pipe_info(out); 1349 1350 if (ipipe && opipe) { 1351 if (off_in || off_out) 1352 return -ESPIPE; 1353 1354 if (!(in->f_mode & FMODE_READ)) 1355 return -EBADF; 1356 1357 if (!(out->f_mode & FMODE_WRITE)) 1358 return -EBADF; 1359 1360 /* Splicing to self would be fun, but... */ 1361 if (ipipe == opipe) 1362 return -EINVAL; 1363 1364 return splice_pipe_to_pipe(ipipe, opipe, len, flags); 1365 } 1366 1367 if (ipipe) { 1368 if (off_in) 1369 return -ESPIPE; 1370 if (off_out) { 1371 if (!(out->f_mode & FMODE_PWRITE)) 1372 return -EINVAL; 1373 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1374 return -EFAULT; 1375 } else { 1376 offset = out->f_pos; 1377 } 1378 1379 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1380 return -EBADF; 1381 1382 if (unlikely(out->f_flags & O_APPEND)) 1383 return -EINVAL; 1384 1385 ret = rw_verify_area(WRITE, out, &offset, len); 1386 if (unlikely(ret < 0)) 1387 return ret; 1388 1389 file_start_write(out); 1390 ret = do_splice_from(ipipe, out, &offset, len, flags); 1391 file_end_write(out); 1392 1393 if (!off_out) 1394 out->f_pos = offset; 1395 else if (copy_to_user(off_out, &offset, sizeof(loff_t))) 1396 ret = -EFAULT; 1397 1398 return ret; 1399 } 1400 1401 if (opipe) { 1402 if (off_out) 1403 return -ESPIPE; 1404 if (off_in) { 1405 if (!(in->f_mode & FMODE_PREAD)) 1406 return -EINVAL; 1407 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1408 return -EFAULT; 1409 } else { 1410 offset = in->f_pos; 1411 } 1412 1413 ret = do_splice_to(in, &offset, opipe, len, flags); 1414 1415 if (!off_in) 1416 in->f_pos = offset; 1417 else if (copy_to_user(off_in, &offset, sizeof(loff_t))) 1418 ret = -EFAULT; 1419 1420 return ret; 1421 } 1422 1423 return -EINVAL; 1424 } 1425 1426 /* 1427 * Map an iov into an array of pages and offset/length tupples. With the 1428 * partial_page structure, we can map several non-contiguous ranges into 1429 * our ones pages[] map instead of splitting that operation into pieces. 1430 * Could easily be exported as a generic helper for other users, in which 1431 * case one would probably want to add a 'max_nr_pages' parameter as well. 1432 */ 1433 static int get_iovec_page_array(const struct iovec __user *iov, 1434 unsigned int nr_vecs, struct page **pages, 1435 struct partial_page *partial, bool aligned, 1436 unsigned int pipe_buffers) 1437 { 1438 int buffers = 0, error = 0; 1439 1440 while (nr_vecs) { 1441 unsigned long off, npages; 1442 struct iovec entry; 1443 void __user *base; 1444 size_t len; 1445 int i; 1446 1447 error = -EFAULT; 1448 if (copy_from_user(&entry, iov, sizeof(entry))) 1449 break; 1450 1451 base = entry.iov_base; 1452 len = entry.iov_len; 1453 1454 /* 1455 * Sanity check this iovec. 0 read succeeds. 1456 */ 1457 error = 0; 1458 if (unlikely(!len)) 1459 break; 1460 error = -EFAULT; 1461 if (!access_ok(VERIFY_READ, base, len)) 1462 break; 1463 1464 /* 1465 * Get this base offset and number of pages, then map 1466 * in the user pages. 1467 */ 1468 off = (unsigned long) base & ~PAGE_MASK; 1469 1470 /* 1471 * If asked for alignment, the offset must be zero and the 1472 * length a multiple of the PAGE_SIZE. 1473 */ 1474 error = -EINVAL; 1475 if (aligned && (off || len & ~PAGE_MASK)) 1476 break; 1477 1478 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1479 if (npages > pipe_buffers - buffers) 1480 npages = pipe_buffers - buffers; 1481 1482 error = get_user_pages_fast((unsigned long)base, npages, 1483 0, &pages[buffers]); 1484 1485 if (unlikely(error <= 0)) 1486 break; 1487 1488 /* 1489 * Fill this contiguous range into the partial page map. 1490 */ 1491 for (i = 0; i < error; i++) { 1492 const int plen = min_t(size_t, len, PAGE_SIZE - off); 1493 1494 partial[buffers].offset = off; 1495 partial[buffers].len = plen; 1496 1497 off = 0; 1498 len -= plen; 1499 buffers++; 1500 } 1501 1502 /* 1503 * We didn't complete this iov, stop here since it probably 1504 * means we have to move some of this into a pipe to 1505 * be able to continue. 1506 */ 1507 if (len) 1508 break; 1509 1510 /* 1511 * Don't continue if we mapped fewer pages than we asked for, 1512 * or if we mapped the max number of pages that we have 1513 * room for. 1514 */ 1515 if (error < npages || buffers == pipe_buffers) 1516 break; 1517 1518 nr_vecs--; 1519 iov++; 1520 } 1521 1522 if (buffers) 1523 return buffers; 1524 1525 return error; 1526 } 1527 1528 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1529 struct splice_desc *sd) 1530 { 1531 char *src; 1532 int ret; 1533 1534 /* 1535 * See if we can use the atomic maps, by prefaulting in the 1536 * pages and doing an atomic copy 1537 */ 1538 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { 1539 src = buf->ops->map(pipe, buf, 1); 1540 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, 1541 sd->len); 1542 buf->ops->unmap(pipe, buf, src); 1543 if (!ret) { 1544 ret = sd->len; 1545 goto out; 1546 } 1547 } 1548 1549 /* 1550 * No dice, use slow non-atomic map and copy 1551 */ 1552 src = buf->ops->map(pipe, buf, 0); 1553 1554 ret = sd->len; 1555 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) 1556 ret = -EFAULT; 1557 1558 buf->ops->unmap(pipe, buf, src); 1559 out: 1560 if (ret > 0) 1561 sd->u.userptr += ret; 1562 return ret; 1563 } 1564 1565 /* 1566 * For lack of a better implementation, implement vmsplice() to userspace 1567 * as a simple copy of the pipes pages to the user iov. 1568 */ 1569 static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, 1570 unsigned long nr_segs, unsigned int flags) 1571 { 1572 struct pipe_inode_info *pipe; 1573 struct splice_desc sd; 1574 ssize_t size; 1575 int error; 1576 long ret; 1577 1578 pipe = get_pipe_info(file); 1579 if (!pipe) 1580 return -EBADF; 1581 1582 pipe_lock(pipe); 1583 1584 error = ret = 0; 1585 while (nr_segs) { 1586 void __user *base; 1587 size_t len; 1588 1589 /* 1590 * Get user address base and length for this iovec. 1591 */ 1592 error = get_user(base, &iov->iov_base); 1593 if (unlikely(error)) 1594 break; 1595 error = get_user(len, &iov->iov_len); 1596 if (unlikely(error)) 1597 break; 1598 1599 /* 1600 * Sanity check this iovec. 0 read succeeds. 1601 */ 1602 if (unlikely(!len)) 1603 break; 1604 if (unlikely(!base)) { 1605 error = -EFAULT; 1606 break; 1607 } 1608 1609 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { 1610 error = -EFAULT; 1611 break; 1612 } 1613 1614 sd.len = 0; 1615 sd.total_len = len; 1616 sd.flags = flags; 1617 sd.u.userptr = base; 1618 sd.pos = 0; 1619 1620 size = __splice_from_pipe(pipe, &sd, pipe_to_user); 1621 if (size < 0) { 1622 if (!ret) 1623 ret = size; 1624 1625 break; 1626 } 1627 1628 ret += size; 1629 1630 if (size < len) 1631 break; 1632 1633 nr_segs--; 1634 iov++; 1635 } 1636 1637 pipe_unlock(pipe); 1638 1639 if (!ret) 1640 ret = error; 1641 1642 return ret; 1643 } 1644 1645 /* 1646 * vmsplice splices a user address range into a pipe. It can be thought of 1647 * as splice-from-memory, where the regular splice is splice-from-file (or 1648 * to file). In both cases the output is a pipe, naturally. 1649 */ 1650 static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, 1651 unsigned long nr_segs, unsigned int flags) 1652 { 1653 struct pipe_inode_info *pipe; 1654 struct page *pages[PIPE_DEF_BUFFERS]; 1655 struct partial_page partial[PIPE_DEF_BUFFERS]; 1656 struct splice_pipe_desc spd = { 1657 .pages = pages, 1658 .partial = partial, 1659 .nr_pages_max = PIPE_DEF_BUFFERS, 1660 .flags = flags, 1661 .ops = &user_page_pipe_buf_ops, 1662 .spd_release = spd_release_page, 1663 }; 1664 long ret; 1665 1666 pipe = get_pipe_info(file); 1667 if (!pipe) 1668 return -EBADF; 1669 1670 if (splice_grow_spd(pipe, &spd)) 1671 return -ENOMEM; 1672 1673 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, 1674 spd.partial, false, 1675 spd.nr_pages_max); 1676 if (spd.nr_pages <= 0) 1677 ret = spd.nr_pages; 1678 else 1679 ret = splice_to_pipe(pipe, &spd); 1680 1681 splice_shrink_spd(&spd); 1682 return ret; 1683 } 1684 1685 /* 1686 * Note that vmsplice only really supports true splicing _from_ user memory 1687 * to a pipe, not the other way around. Splicing from user memory is a simple 1688 * operation that can be supported without any funky alignment restrictions 1689 * or nasty vm tricks. We simply map in the user memory and fill them into 1690 * a pipe. The reverse isn't quite as easy, though. There are two possible 1691 * solutions for that: 1692 * 1693 * - memcpy() the data internally, at which point we might as well just 1694 * do a regular read() on the buffer anyway. 1695 * - Lots of nasty vm tricks, that are neither fast nor flexible (it 1696 * has restriction limitations on both ends of the pipe). 1697 * 1698 * Currently we punt and implement it as a normal copy, see pipe_to_user(). 1699 * 1700 */ 1701 SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, 1702 unsigned long, nr_segs, unsigned int, flags) 1703 { 1704 struct fd f; 1705 long error; 1706 1707 if (unlikely(nr_segs > UIO_MAXIOV)) 1708 return -EINVAL; 1709 else if (unlikely(!nr_segs)) 1710 return 0; 1711 1712 error = -EBADF; 1713 f = fdget(fd); 1714 if (f.file) { 1715 if (f.file->f_mode & FMODE_WRITE) 1716 error = vmsplice_to_pipe(f.file, iov, nr_segs, flags); 1717 else if (f.file->f_mode & FMODE_READ) 1718 error = vmsplice_to_user(f.file, iov, nr_segs, flags); 1719 1720 fdput(f); 1721 } 1722 1723 return error; 1724 } 1725 1726 #ifdef CONFIG_COMPAT 1727 COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, 1728 unsigned int, nr_segs, unsigned int, flags) 1729 { 1730 unsigned i; 1731 struct iovec __user *iov; 1732 if (nr_segs > UIO_MAXIOV) 1733 return -EINVAL; 1734 iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); 1735 for (i = 0; i < nr_segs; i++) { 1736 struct compat_iovec v; 1737 if (get_user(v.iov_base, &iov32[i].iov_base) || 1738 get_user(v.iov_len, &iov32[i].iov_len) || 1739 put_user(compat_ptr(v.iov_base), &iov[i].iov_base) || 1740 put_user(v.iov_len, &iov[i].iov_len)) 1741 return -EFAULT; 1742 } 1743 return sys_vmsplice(fd, iov, nr_segs, flags); 1744 } 1745 #endif 1746 1747 SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, 1748 int, fd_out, loff_t __user *, off_out, 1749 size_t, len, unsigned int, flags) 1750 { 1751 struct fd in, out; 1752 long error; 1753 1754 if (unlikely(!len)) 1755 return 0; 1756 1757 error = -EBADF; 1758 in = fdget(fd_in); 1759 if (in.file) { 1760 if (in.file->f_mode & FMODE_READ) { 1761 out = fdget(fd_out); 1762 if (out.file) { 1763 if (out.file->f_mode & FMODE_WRITE) 1764 error = do_splice(in.file, off_in, 1765 out.file, off_out, 1766 len, flags); 1767 fdput(out); 1768 } 1769 } 1770 fdput(in); 1771 } 1772 return error; 1773 } 1774 1775 /* 1776 * Make sure there's data to read. Wait for input if we can, otherwise 1777 * return an appropriate error. 1778 */ 1779 static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1780 { 1781 int ret; 1782 1783 /* 1784 * Check ->nrbufs without the inode lock first. This function 1785 * is speculative anyways, so missing one is ok. 1786 */ 1787 if (pipe->nrbufs) 1788 return 0; 1789 1790 ret = 0; 1791 pipe_lock(pipe); 1792 1793 while (!pipe->nrbufs) { 1794 if (signal_pending(current)) { 1795 ret = -ERESTARTSYS; 1796 break; 1797 } 1798 if (!pipe->writers) 1799 break; 1800 if (!pipe->waiting_writers) { 1801 if (flags & SPLICE_F_NONBLOCK) { 1802 ret = -EAGAIN; 1803 break; 1804 } 1805 } 1806 pipe_wait(pipe); 1807 } 1808 1809 pipe_unlock(pipe); 1810 return ret; 1811 } 1812 1813 /* 1814 * Make sure there's writeable room. Wait for room if we can, otherwise 1815 * return an appropriate error. 1816 */ 1817 static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1818 { 1819 int ret; 1820 1821 /* 1822 * Check ->nrbufs without the inode lock first. This function 1823 * is speculative anyways, so missing one is ok. 1824 */ 1825 if (pipe->nrbufs < pipe->buffers) 1826 return 0; 1827 1828 ret = 0; 1829 pipe_lock(pipe); 1830 1831 while (pipe->nrbufs >= pipe->buffers) { 1832 if (!pipe->readers) { 1833 send_sig(SIGPIPE, current, 0); 1834 ret = -EPIPE; 1835 break; 1836 } 1837 if (flags & SPLICE_F_NONBLOCK) { 1838 ret = -EAGAIN; 1839 break; 1840 } 1841 if (signal_pending(current)) { 1842 ret = -ERESTARTSYS; 1843 break; 1844 } 1845 pipe->waiting_writers++; 1846 pipe_wait(pipe); 1847 pipe->waiting_writers--; 1848 } 1849 1850 pipe_unlock(pipe); 1851 return ret; 1852 } 1853 1854 /* 1855 * Splice contents of ipipe to opipe. 1856 */ 1857 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1858 struct pipe_inode_info *opipe, 1859 size_t len, unsigned int flags) 1860 { 1861 struct pipe_buffer *ibuf, *obuf; 1862 int ret = 0, nbuf; 1863 bool input_wakeup = false; 1864 1865 1866 retry: 1867 ret = ipipe_prep(ipipe, flags); 1868 if (ret) 1869 return ret; 1870 1871 ret = opipe_prep(opipe, flags); 1872 if (ret) 1873 return ret; 1874 1875 /* 1876 * Potential ABBA deadlock, work around it by ordering lock 1877 * grabbing by pipe info address. Otherwise two different processes 1878 * could deadlock (one doing tee from A -> B, the other from B -> A). 1879 */ 1880 pipe_double_lock(ipipe, opipe); 1881 1882 do { 1883 if (!opipe->readers) { 1884 send_sig(SIGPIPE, current, 0); 1885 if (!ret) 1886 ret = -EPIPE; 1887 break; 1888 } 1889 1890 if (!ipipe->nrbufs && !ipipe->writers) 1891 break; 1892 1893 /* 1894 * Cannot make any progress, because either the input 1895 * pipe is empty or the output pipe is full. 1896 */ 1897 if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { 1898 /* Already processed some buffers, break */ 1899 if (ret) 1900 break; 1901 1902 if (flags & SPLICE_F_NONBLOCK) { 1903 ret = -EAGAIN; 1904 break; 1905 } 1906 1907 /* 1908 * We raced with another reader/writer and haven't 1909 * managed to process any buffers. A zero return 1910 * value means EOF, so retry instead. 1911 */ 1912 pipe_unlock(ipipe); 1913 pipe_unlock(opipe); 1914 goto retry; 1915 } 1916 1917 ibuf = ipipe->bufs + ipipe->curbuf; 1918 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); 1919 obuf = opipe->bufs + nbuf; 1920 1921 if (len >= ibuf->len) { 1922 /* 1923 * Simply move the whole buffer from ipipe to opipe 1924 */ 1925 *obuf = *ibuf; 1926 ibuf->ops = NULL; 1927 opipe->nrbufs++; 1928 ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); 1929 ipipe->nrbufs--; 1930 input_wakeup = true; 1931 } else { 1932 /* 1933 * Get a reference to this pipe buffer, 1934 * so we can copy the contents over. 1935 */ 1936 ibuf->ops->get(ipipe, ibuf); 1937 *obuf = *ibuf; 1938 1939 /* 1940 * Don't inherit the gift flag, we need to 1941 * prevent multiple steals of this page. 1942 */ 1943 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 1944 1945 obuf->len = len; 1946 opipe->nrbufs++; 1947 ibuf->offset += obuf->len; 1948 ibuf->len -= obuf->len; 1949 } 1950 ret += obuf->len; 1951 len -= obuf->len; 1952 } while (len); 1953 1954 pipe_unlock(ipipe); 1955 pipe_unlock(opipe); 1956 1957 /* 1958 * If we put data in the output pipe, wakeup any potential readers. 1959 */ 1960 if (ret > 0) 1961 wakeup_pipe_readers(opipe); 1962 1963 if (input_wakeup) 1964 wakeup_pipe_writers(ipipe); 1965 1966 return ret; 1967 } 1968 1969 /* 1970 * Link contents of ipipe to opipe. 1971 */ 1972 static int link_pipe(struct pipe_inode_info *ipipe, 1973 struct pipe_inode_info *opipe, 1974 size_t len, unsigned int flags) 1975 { 1976 struct pipe_buffer *ibuf, *obuf; 1977 int ret = 0, i = 0, nbuf; 1978 1979 /* 1980 * Potential ABBA deadlock, work around it by ordering lock 1981 * grabbing by pipe info address. Otherwise two different processes 1982 * could deadlock (one doing tee from A -> B, the other from B -> A). 1983 */ 1984 pipe_double_lock(ipipe, opipe); 1985 1986 do { 1987 if (!opipe->readers) { 1988 send_sig(SIGPIPE, current, 0); 1989 if (!ret) 1990 ret = -EPIPE; 1991 break; 1992 } 1993 1994 /* 1995 * If we have iterated all input buffers or ran out of 1996 * output room, break. 1997 */ 1998 if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) 1999 break; 2000 2001 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); 2002 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); 2003 2004 /* 2005 * Get a reference to this pipe buffer, 2006 * so we can copy the contents over. 2007 */ 2008 ibuf->ops->get(ipipe, ibuf); 2009 2010 obuf = opipe->bufs + nbuf; 2011 *obuf = *ibuf; 2012 2013 /* 2014 * Don't inherit the gift flag, we need to 2015 * prevent multiple steals of this page. 2016 */ 2017 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 2018 2019 if (obuf->len > len) 2020 obuf->len = len; 2021 2022 opipe->nrbufs++; 2023 ret += obuf->len; 2024 len -= obuf->len; 2025 i++; 2026 } while (len); 2027 2028 /* 2029 * return EAGAIN if we have the potential of some data in the 2030 * future, otherwise just return 0 2031 */ 2032 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) 2033 ret = -EAGAIN; 2034 2035 pipe_unlock(ipipe); 2036 pipe_unlock(opipe); 2037 2038 /* 2039 * If we put data in the output pipe, wakeup any potential readers. 2040 */ 2041 if (ret > 0) 2042 wakeup_pipe_readers(opipe); 2043 2044 return ret; 2045 } 2046 2047 /* 2048 * This is a tee(1) implementation that works on pipes. It doesn't copy 2049 * any data, it simply references the 'in' pages on the 'out' pipe. 2050 * The 'flags' used are the SPLICE_F_* variants, currently the only 2051 * applicable one is SPLICE_F_NONBLOCK. 2052 */ 2053 static long do_tee(struct file *in, struct file *out, size_t len, 2054 unsigned int flags) 2055 { 2056 struct pipe_inode_info *ipipe = get_pipe_info(in); 2057 struct pipe_inode_info *opipe = get_pipe_info(out); 2058 int ret = -EINVAL; 2059 2060 /* 2061 * Duplicate the contents of ipipe to opipe without actually 2062 * copying the data. 2063 */ 2064 if (ipipe && opipe && ipipe != opipe) { 2065 /* 2066 * Keep going, unless we encounter an error. The ipipe/opipe 2067 * ordering doesn't really matter. 2068 */ 2069 ret = ipipe_prep(ipipe, flags); 2070 if (!ret) { 2071 ret = opipe_prep(opipe, flags); 2072 if (!ret) 2073 ret = link_pipe(ipipe, opipe, len, flags); 2074 } 2075 } 2076 2077 return ret; 2078 } 2079 2080 SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) 2081 { 2082 struct fd in; 2083 int error; 2084 2085 if (unlikely(!len)) 2086 return 0; 2087 2088 error = -EBADF; 2089 in = fdget(fdin); 2090 if (in.file) { 2091 if (in.file->f_mode & FMODE_READ) { 2092 struct fd out = fdget(fdout); 2093 if (out.file) { 2094 if (out.file->f_mode & FMODE_WRITE) 2095 error = do_tee(in.file, out.file, 2096 len, flags); 2097 fdput(out); 2098 } 2099 } 2100 fdput(in); 2101 } 2102 2103 return error; 2104 } 2105