1 /* 2 * "splice": joining two ropes together by interweaving their strands. 3 * 4 * This is the "extended pipe" functionality, where a pipe is used as 5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel 6 * buffer that you can use to transfer data from one end to the other. 7 * 8 * The traditional unix read/write is extended with a "splice()" operation 9 * that transfers data buffers to or from a pipe buffer. 10 * 11 * Named by Larry McVoy, original implementation from Linus, extended by 12 * Jens to support splicing to files, network, direct splicing, etc and 13 * fixing lots of bugs. 14 * 15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> 16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> 17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> 18 * 19 */ 20 #include <linux/fs.h> 21 #include <linux/file.h> 22 #include <linux/pagemap.h> 23 #include <linux/splice.h> 24 #include <linux/memcontrol.h> 25 #include <linux/mm_inline.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 #include <linux/export.h> 29 #include <linux/syscalls.h> 30 #include <linux/uio.h> 31 #include <linux/security.h> 32 #include <linux/gfp.h> 33 #include <linux/socket.h> 34 #include <linux/compat.h> 35 #include "internal.h" 36 37 /* 38 * Attempt to steal a page from a pipe buffer. This should perhaps go into 39 * a vm helper function, it's already simplified quite a bit by the 40 * addition of remove_mapping(). If success is returned, the caller may 41 * attempt to reuse this page for another destination. 42 */ 43 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, 44 struct pipe_buffer *buf) 45 { 46 struct page *page = buf->page; 47 struct address_space *mapping; 48 49 lock_page(page); 50 51 mapping = page_mapping(page); 52 if (mapping) { 53 WARN_ON(!PageUptodate(page)); 54 55 /* 56 * At least for ext2 with nobh option, we need to wait on 57 * writeback completing on this page, since we'll remove it 58 * from the pagecache. Otherwise truncate wont wait on the 59 * page, allowing the disk blocks to be reused by someone else 60 * before we actually wrote our data to them. fs corruption 61 * ensues. 62 */ 63 wait_on_page_writeback(page); 64 65 if (page_has_private(page) && 66 !try_to_release_page(page, GFP_KERNEL)) 67 goto out_unlock; 68 69 /* 70 * If we succeeded in removing the mapping, set LRU flag 71 * and return good. 72 */ 73 if (remove_mapping(mapping, page)) { 74 buf->flags |= PIPE_BUF_FLAG_LRU; 75 return 0; 76 } 77 } 78 79 /* 80 * Raced with truncate or failed to remove page from current 81 * address space, unlock and return failure. 82 */ 83 out_unlock: 84 unlock_page(page); 85 return 1; 86 } 87 88 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 page_cache_release(buf->page); 92 buf->flags &= ~PIPE_BUF_FLAG_LRU; 93 } 94 95 /* 96 * Check whether the contents of buf is OK to access. Since the content 97 * is a page cache page, IO may be in flight. 98 */ 99 static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, 100 struct pipe_buffer *buf) 101 { 102 struct page *page = buf->page; 103 int err; 104 105 if (!PageUptodate(page)) { 106 lock_page(page); 107 108 /* 109 * Page got truncated/unhashed. This will cause a 0-byte 110 * splice, if this is the first page. 111 */ 112 if (!page->mapping) { 113 err = -ENODATA; 114 goto error; 115 } 116 117 /* 118 * Uh oh, read-error from disk. 119 */ 120 if (!PageUptodate(page)) { 121 err = -EIO; 122 goto error; 123 } 124 125 /* 126 * Page is ok afterall, we are done. 127 */ 128 unlock_page(page); 129 } 130 131 return 0; 132 error: 133 unlock_page(page); 134 return err; 135 } 136 137 const struct pipe_buf_operations page_cache_pipe_buf_ops = { 138 .can_merge = 0, 139 .map = generic_pipe_buf_map, 140 .unmap = generic_pipe_buf_unmap, 141 .confirm = page_cache_pipe_buf_confirm, 142 .release = page_cache_pipe_buf_release, 143 .steal = page_cache_pipe_buf_steal, 144 .get = generic_pipe_buf_get, 145 }; 146 147 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 148 struct pipe_buffer *buf) 149 { 150 if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) 151 return 1; 152 153 buf->flags |= PIPE_BUF_FLAG_LRU; 154 return generic_pipe_buf_steal(pipe, buf); 155 } 156 157 static const struct pipe_buf_operations user_page_pipe_buf_ops = { 158 .can_merge = 0, 159 .map = generic_pipe_buf_map, 160 .unmap = generic_pipe_buf_unmap, 161 .confirm = generic_pipe_buf_confirm, 162 .release = page_cache_pipe_buf_release, 163 .steal = user_page_pipe_buf_steal, 164 .get = generic_pipe_buf_get, 165 }; 166 167 static void wakeup_pipe_readers(struct pipe_inode_info *pipe) 168 { 169 smp_mb(); 170 if (waitqueue_active(&pipe->wait)) 171 wake_up_interruptible(&pipe->wait); 172 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 173 } 174 175 /** 176 * splice_to_pipe - fill passed data into a pipe 177 * @pipe: pipe to fill 178 * @spd: data to fill 179 * 180 * Description: 181 * @spd contains a map of pages and len/offset tuples, along with 182 * the struct pipe_buf_operations associated with these pages. This 183 * function will link that data to the pipe. 184 * 185 */ 186 ssize_t splice_to_pipe(struct pipe_inode_info *pipe, 187 struct splice_pipe_desc *spd) 188 { 189 unsigned int spd_pages = spd->nr_pages; 190 int ret, do_wakeup, page_nr; 191 192 ret = 0; 193 do_wakeup = 0; 194 page_nr = 0; 195 196 pipe_lock(pipe); 197 198 for (;;) { 199 if (!pipe->readers) { 200 send_sig(SIGPIPE, current, 0); 201 if (!ret) 202 ret = -EPIPE; 203 break; 204 } 205 206 if (pipe->nrbufs < pipe->buffers) { 207 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 208 struct pipe_buffer *buf = pipe->bufs + newbuf; 209 210 buf->page = spd->pages[page_nr]; 211 buf->offset = spd->partial[page_nr].offset; 212 buf->len = spd->partial[page_nr].len; 213 buf->private = spd->partial[page_nr].private; 214 buf->ops = spd->ops; 215 if (spd->flags & SPLICE_F_GIFT) 216 buf->flags |= PIPE_BUF_FLAG_GIFT; 217 218 pipe->nrbufs++; 219 page_nr++; 220 ret += buf->len; 221 222 if (pipe->files) 223 do_wakeup = 1; 224 225 if (!--spd->nr_pages) 226 break; 227 if (pipe->nrbufs < pipe->buffers) 228 continue; 229 230 break; 231 } 232 233 if (spd->flags & SPLICE_F_NONBLOCK) { 234 if (!ret) 235 ret = -EAGAIN; 236 break; 237 } 238 239 if (signal_pending(current)) { 240 if (!ret) 241 ret = -ERESTARTSYS; 242 break; 243 } 244 245 if (do_wakeup) { 246 smp_mb(); 247 if (waitqueue_active(&pipe->wait)) 248 wake_up_interruptible_sync(&pipe->wait); 249 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 250 do_wakeup = 0; 251 } 252 253 pipe->waiting_writers++; 254 pipe_wait(pipe); 255 pipe->waiting_writers--; 256 } 257 258 pipe_unlock(pipe); 259 260 if (do_wakeup) 261 wakeup_pipe_readers(pipe); 262 263 while (page_nr < spd_pages) 264 spd->spd_release(spd, page_nr++); 265 266 return ret; 267 } 268 269 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) 270 { 271 page_cache_release(spd->pages[i]); 272 } 273 274 /* 275 * Check if we need to grow the arrays holding pages and partial page 276 * descriptions. 277 */ 278 int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) 279 { 280 unsigned int buffers = ACCESS_ONCE(pipe->buffers); 281 282 spd->nr_pages_max = buffers; 283 if (buffers <= PIPE_DEF_BUFFERS) 284 return 0; 285 286 spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); 287 spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); 288 289 if (spd->pages && spd->partial) 290 return 0; 291 292 kfree(spd->pages); 293 kfree(spd->partial); 294 return -ENOMEM; 295 } 296 297 void splice_shrink_spd(struct splice_pipe_desc *spd) 298 { 299 if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) 300 return; 301 302 kfree(spd->pages); 303 kfree(spd->partial); 304 } 305 306 static int 307 __generic_file_splice_read(struct file *in, loff_t *ppos, 308 struct pipe_inode_info *pipe, size_t len, 309 unsigned int flags) 310 { 311 struct address_space *mapping = in->f_mapping; 312 unsigned int loff, nr_pages, req_pages; 313 struct page *pages[PIPE_DEF_BUFFERS]; 314 struct partial_page partial[PIPE_DEF_BUFFERS]; 315 struct page *page; 316 pgoff_t index, end_index; 317 loff_t isize; 318 int error, page_nr; 319 struct splice_pipe_desc spd = { 320 .pages = pages, 321 .partial = partial, 322 .nr_pages_max = PIPE_DEF_BUFFERS, 323 .flags = flags, 324 .ops = &page_cache_pipe_buf_ops, 325 .spd_release = spd_release_page, 326 }; 327 328 if (splice_grow_spd(pipe, &spd)) 329 return -ENOMEM; 330 331 index = *ppos >> PAGE_CACHE_SHIFT; 332 loff = *ppos & ~PAGE_CACHE_MASK; 333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 334 nr_pages = min(req_pages, spd.nr_pages_max); 335 336 /* 337 * Lookup the (hopefully) full range of pages we need. 338 */ 339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); 340 index += spd.nr_pages; 341 342 /* 343 * If find_get_pages_contig() returned fewer pages than we needed, 344 * readahead/allocate the rest and fill in the holes. 345 */ 346 if (spd.nr_pages < nr_pages) 347 page_cache_sync_readahead(mapping, &in->f_ra, in, 348 index, req_pages - spd.nr_pages); 349 350 error = 0; 351 while (spd.nr_pages < nr_pages) { 352 /* 353 * Page could be there, find_get_pages_contig() breaks on 354 * the first hole. 355 */ 356 page = find_get_page(mapping, index); 357 if (!page) { 358 /* 359 * page didn't exist, allocate one. 360 */ 361 page = page_cache_alloc_cold(mapping); 362 if (!page) 363 break; 364 365 error = add_to_page_cache_lru(page, mapping, index, 366 GFP_KERNEL); 367 if (unlikely(error)) { 368 page_cache_release(page); 369 if (error == -EEXIST) 370 continue; 371 break; 372 } 373 /* 374 * add_to_page_cache() locks the page, unlock it 375 * to avoid convoluting the logic below even more. 376 */ 377 unlock_page(page); 378 } 379 380 spd.pages[spd.nr_pages++] = page; 381 index++; 382 } 383 384 /* 385 * Now loop over the map and see if we need to start IO on any 386 * pages, fill in the partial map, etc. 387 */ 388 index = *ppos >> PAGE_CACHE_SHIFT; 389 nr_pages = spd.nr_pages; 390 spd.nr_pages = 0; 391 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 392 unsigned int this_len; 393 394 if (!len) 395 break; 396 397 /* 398 * this_len is the max we'll use from this page 399 */ 400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 401 page = spd.pages[page_nr]; 402 403 if (PageReadahead(page)) 404 page_cache_async_readahead(mapping, &in->f_ra, in, 405 page, index, req_pages - page_nr); 406 407 /* 408 * If the page isn't uptodate, we may need to start io on it 409 */ 410 if (!PageUptodate(page)) { 411 lock_page(page); 412 413 /* 414 * Page was truncated, or invalidated by the 415 * filesystem. Redo the find/create, but this time the 416 * page is kept locked, so there's no chance of another 417 * race with truncate/invalidate. 418 */ 419 if (!page->mapping) { 420 unlock_page(page); 421 page = find_or_create_page(mapping, index, 422 mapping_gfp_mask(mapping)); 423 424 if (!page) { 425 error = -ENOMEM; 426 break; 427 } 428 page_cache_release(spd.pages[page_nr]); 429 spd.pages[page_nr] = page; 430 } 431 /* 432 * page was already under io and is now done, great 433 */ 434 if (PageUptodate(page)) { 435 unlock_page(page); 436 goto fill_it; 437 } 438 439 /* 440 * need to read in the page 441 */ 442 error = mapping->a_ops->readpage(in, page); 443 if (unlikely(error)) { 444 /* 445 * We really should re-lookup the page here, 446 * but it complicates things a lot. Instead 447 * lets just do what we already stored, and 448 * we'll get it the next time we are called. 449 */ 450 if (error == AOP_TRUNCATED_PAGE) 451 error = 0; 452 453 break; 454 } 455 } 456 fill_it: 457 /* 458 * i_size must be checked after PageUptodate. 459 */ 460 isize = i_size_read(mapping->host); 461 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 462 if (unlikely(!isize || index > end_index)) 463 break; 464 465 /* 466 * if this is the last page, see if we need to shrink 467 * the length and stop 468 */ 469 if (end_index == index) { 470 unsigned int plen; 471 472 /* 473 * max good bytes in this page 474 */ 475 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 476 if (plen <= loff) 477 break; 478 479 /* 480 * force quit after adding this page 481 */ 482 this_len = min(this_len, plen - loff); 483 len = this_len; 484 } 485 486 spd.partial[page_nr].offset = loff; 487 spd.partial[page_nr].len = this_len; 488 len -= this_len; 489 loff = 0; 490 spd.nr_pages++; 491 index++; 492 } 493 494 /* 495 * Release any pages at the end, if we quit early. 'page_nr' is how far 496 * we got, 'nr_pages' is how many pages are in the map. 497 */ 498 while (page_nr < nr_pages) 499 page_cache_release(spd.pages[page_nr++]); 500 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 501 502 if (spd.nr_pages) 503 error = splice_to_pipe(pipe, &spd); 504 505 splice_shrink_spd(&spd); 506 return error; 507 } 508 509 /** 510 * generic_file_splice_read - splice data from file to a pipe 511 * @in: file to splice from 512 * @ppos: position in @in 513 * @pipe: pipe to splice to 514 * @len: number of bytes to splice 515 * @flags: splice modifier flags 516 * 517 * Description: 518 * Will read pages from given file and fill them into a pipe. Can be 519 * used as long as the address_space operations for the source implements 520 * a readpage() hook. 521 * 522 */ 523 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, 524 struct pipe_inode_info *pipe, size_t len, 525 unsigned int flags) 526 { 527 loff_t isize, left; 528 int ret; 529 530 isize = i_size_read(in->f_mapping->host); 531 if (unlikely(*ppos >= isize)) 532 return 0; 533 534 left = isize - *ppos; 535 if (unlikely(left < len)) 536 len = left; 537 538 ret = __generic_file_splice_read(in, ppos, pipe, len, flags); 539 if (ret > 0) { 540 *ppos += ret; 541 file_accessed(in); 542 } 543 544 return ret; 545 } 546 EXPORT_SYMBOL(generic_file_splice_read); 547 548 static const struct pipe_buf_operations default_pipe_buf_ops = { 549 .can_merge = 0, 550 .map = generic_pipe_buf_map, 551 .unmap = generic_pipe_buf_unmap, 552 .confirm = generic_pipe_buf_confirm, 553 .release = generic_pipe_buf_release, 554 .steal = generic_pipe_buf_steal, 555 .get = generic_pipe_buf_get, 556 }; 557 558 static ssize_t kernel_readv(struct file *file, const struct iovec *vec, 559 unsigned long vlen, loff_t offset) 560 { 561 mm_segment_t old_fs; 562 loff_t pos = offset; 563 ssize_t res; 564 565 old_fs = get_fs(); 566 set_fs(get_ds()); 567 /* The cast to a user pointer is valid due to the set_fs() */ 568 res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); 569 set_fs(old_fs); 570 571 return res; 572 } 573 574 ssize_t kernel_write(struct file *file, const char *buf, size_t count, 575 loff_t pos) 576 { 577 mm_segment_t old_fs; 578 ssize_t res; 579 580 old_fs = get_fs(); 581 set_fs(get_ds()); 582 /* The cast to a user pointer is valid due to the set_fs() */ 583 res = vfs_write(file, (__force const char __user *)buf, count, &pos); 584 set_fs(old_fs); 585 586 return res; 587 } 588 EXPORT_SYMBOL(kernel_write); 589 590 ssize_t default_file_splice_read(struct file *in, loff_t *ppos, 591 struct pipe_inode_info *pipe, size_t len, 592 unsigned int flags) 593 { 594 unsigned int nr_pages; 595 unsigned int nr_freed; 596 size_t offset; 597 struct page *pages[PIPE_DEF_BUFFERS]; 598 struct partial_page partial[PIPE_DEF_BUFFERS]; 599 struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; 600 ssize_t res; 601 size_t this_len; 602 int error; 603 int i; 604 struct splice_pipe_desc spd = { 605 .pages = pages, 606 .partial = partial, 607 .nr_pages_max = PIPE_DEF_BUFFERS, 608 .flags = flags, 609 .ops = &default_pipe_buf_ops, 610 .spd_release = spd_release_page, 611 }; 612 613 if (splice_grow_spd(pipe, &spd)) 614 return -ENOMEM; 615 616 res = -ENOMEM; 617 vec = __vec; 618 if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { 619 vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); 620 if (!vec) 621 goto shrink_ret; 622 } 623 624 offset = *ppos & ~PAGE_CACHE_MASK; 625 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 626 627 for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { 628 struct page *page; 629 630 page = alloc_page(GFP_USER); 631 error = -ENOMEM; 632 if (!page) 633 goto err; 634 635 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 636 vec[i].iov_base = (void __user *) page_address(page); 637 vec[i].iov_len = this_len; 638 spd.pages[i] = page; 639 spd.nr_pages++; 640 len -= this_len; 641 offset = 0; 642 } 643 644 res = kernel_readv(in, vec, spd.nr_pages, *ppos); 645 if (res < 0) { 646 error = res; 647 goto err; 648 } 649 650 error = 0; 651 if (!res) 652 goto err; 653 654 nr_freed = 0; 655 for (i = 0; i < spd.nr_pages; i++) { 656 this_len = min_t(size_t, vec[i].iov_len, res); 657 spd.partial[i].offset = 0; 658 spd.partial[i].len = this_len; 659 if (!this_len) { 660 __free_page(spd.pages[i]); 661 spd.pages[i] = NULL; 662 nr_freed++; 663 } 664 res -= this_len; 665 } 666 spd.nr_pages -= nr_freed; 667 668 res = splice_to_pipe(pipe, &spd); 669 if (res > 0) 670 *ppos += res; 671 672 shrink_ret: 673 if (vec != __vec) 674 kfree(vec); 675 splice_shrink_spd(&spd); 676 return res; 677 678 err: 679 for (i = 0; i < spd.nr_pages; i++) 680 __free_page(spd.pages[i]); 681 682 res = error; 683 goto shrink_ret; 684 } 685 EXPORT_SYMBOL(default_file_splice_read); 686 687 /* 688 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 689 * using sendpage(). Return the number of bytes sent. 690 */ 691 static int pipe_to_sendpage(struct pipe_inode_info *pipe, 692 struct pipe_buffer *buf, struct splice_desc *sd) 693 { 694 struct file *file = sd->u.file; 695 loff_t pos = sd->pos; 696 int more; 697 698 if (!likely(file->f_op->sendpage)) 699 return -EINVAL; 700 701 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; 702 703 if (sd->len < sd->total_len && pipe->nrbufs > 1) 704 more |= MSG_SENDPAGE_NOTLAST; 705 706 return file->f_op->sendpage(file, buf->page, buf->offset, 707 sd->len, &pos, more); 708 } 709 710 /* 711 * This is a little more tricky than the file -> pipe splicing. There are 712 * basically three cases: 713 * 714 * - Destination page already exists in the address space and there 715 * are users of it. For that case we have no other option that 716 * copying the data. Tough luck. 717 * - Destination page already exists in the address space, but there 718 * are no users of it. Make sure it's uptodate, then drop it. Fall 719 * through to last case. 720 * - Destination page does not exist, we can add the pipe page to 721 * the page cache and avoid the copy. 722 * 723 * If asked to move pages to the output file (SPLICE_F_MOVE is set in 724 * sd->flags), we attempt to migrate pages from the pipe to the output 725 * file address space page cache. This is possible if no one else has 726 * the pipe page referenced outside of the pipe and page cache. If 727 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 728 * a new page in the output file page cache and fill/dirty that. 729 */ 730 int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 731 struct splice_desc *sd) 732 { 733 struct file *file = sd->u.file; 734 struct address_space *mapping = file->f_mapping; 735 unsigned int offset, this_len; 736 struct page *page; 737 void *fsdata; 738 int ret; 739 740 offset = sd->pos & ~PAGE_CACHE_MASK; 741 742 this_len = sd->len; 743 if (this_len + offset > PAGE_CACHE_SIZE) 744 this_len = PAGE_CACHE_SIZE - offset; 745 746 ret = pagecache_write_begin(file, mapping, sd->pos, this_len, 747 AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 748 if (unlikely(ret)) 749 goto out; 750 751 if (buf->page != page) { 752 char *src = buf->ops->map(pipe, buf, 1); 753 char *dst = kmap_atomic(page); 754 755 memcpy(dst + offset, src + buf->offset, this_len); 756 flush_dcache_page(page); 757 kunmap_atomic(dst); 758 buf->ops->unmap(pipe, buf, src); 759 } 760 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 761 page, fsdata); 762 out: 763 return ret; 764 } 765 EXPORT_SYMBOL(pipe_to_file); 766 767 static void wakeup_pipe_writers(struct pipe_inode_info *pipe) 768 { 769 smp_mb(); 770 if (waitqueue_active(&pipe->wait)) 771 wake_up_interruptible(&pipe->wait); 772 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 773 } 774 775 /** 776 * splice_from_pipe_feed - feed available data from a pipe to a file 777 * @pipe: pipe to splice from 778 * @sd: information to @actor 779 * @actor: handler that splices the data 780 * 781 * Description: 782 * This function loops over the pipe and calls @actor to do the 783 * actual moving of a single struct pipe_buffer to the desired 784 * destination. It returns when there's no more buffers left in 785 * the pipe or if the requested number of bytes (@sd->total_len) 786 * have been copied. It returns a positive number (one) if the 787 * pipe needs to be filled with more data, zero if the required 788 * number of bytes have been copied and -errno on error. 789 * 790 * This, together with splice_from_pipe_{begin,end,next}, may be 791 * used to implement the functionality of __splice_from_pipe() when 792 * locking is required around copying the pipe buffers to the 793 * destination. 794 */ 795 int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, 796 splice_actor *actor) 797 { 798 int ret; 799 800 while (pipe->nrbufs) { 801 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 802 const struct pipe_buf_operations *ops = buf->ops; 803 804 sd->len = buf->len; 805 if (sd->len > sd->total_len) 806 sd->len = sd->total_len; 807 808 ret = buf->ops->confirm(pipe, buf); 809 if (unlikely(ret)) { 810 if (ret == -ENODATA) 811 ret = 0; 812 return ret; 813 } 814 815 ret = actor(pipe, buf, sd); 816 if (ret <= 0) 817 return ret; 818 819 buf->offset += ret; 820 buf->len -= ret; 821 822 sd->num_spliced += ret; 823 sd->len -= ret; 824 sd->pos += ret; 825 sd->total_len -= ret; 826 827 if (!buf->len) { 828 buf->ops = NULL; 829 ops->release(pipe, buf); 830 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); 831 pipe->nrbufs--; 832 if (pipe->files) 833 sd->need_wakeup = true; 834 } 835 836 if (!sd->total_len) 837 return 0; 838 } 839 840 return 1; 841 } 842 EXPORT_SYMBOL(splice_from_pipe_feed); 843 844 /** 845 * splice_from_pipe_next - wait for some data to splice from 846 * @pipe: pipe to splice from 847 * @sd: information about the splice operation 848 * 849 * Description: 850 * This function will wait for some data and return a positive 851 * value (one) if pipe buffers are available. It will return zero 852 * or -errno if no more data needs to be spliced. 853 */ 854 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) 855 { 856 while (!pipe->nrbufs) { 857 if (!pipe->writers) 858 return 0; 859 860 if (!pipe->waiting_writers && sd->num_spliced) 861 return 0; 862 863 if (sd->flags & SPLICE_F_NONBLOCK) 864 return -EAGAIN; 865 866 if (signal_pending(current)) 867 return -ERESTARTSYS; 868 869 if (sd->need_wakeup) { 870 wakeup_pipe_writers(pipe); 871 sd->need_wakeup = false; 872 } 873 874 pipe_wait(pipe); 875 } 876 877 return 1; 878 } 879 EXPORT_SYMBOL(splice_from_pipe_next); 880 881 /** 882 * splice_from_pipe_begin - start splicing from pipe 883 * @sd: information about the splice operation 884 * 885 * Description: 886 * This function should be called before a loop containing 887 * splice_from_pipe_next() and splice_from_pipe_feed() to 888 * initialize the necessary fields of @sd. 889 */ 890 void splice_from_pipe_begin(struct splice_desc *sd) 891 { 892 sd->num_spliced = 0; 893 sd->need_wakeup = false; 894 } 895 EXPORT_SYMBOL(splice_from_pipe_begin); 896 897 /** 898 * splice_from_pipe_end - finish splicing from pipe 899 * @pipe: pipe to splice from 900 * @sd: information about the splice operation 901 * 902 * Description: 903 * This function will wake up pipe writers if necessary. It should 904 * be called after a loop containing splice_from_pipe_next() and 905 * splice_from_pipe_feed(). 906 */ 907 void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) 908 { 909 if (sd->need_wakeup) 910 wakeup_pipe_writers(pipe); 911 } 912 EXPORT_SYMBOL(splice_from_pipe_end); 913 914 /** 915 * __splice_from_pipe - splice data from a pipe to given actor 916 * @pipe: pipe to splice from 917 * @sd: information to @actor 918 * @actor: handler that splices the data 919 * 920 * Description: 921 * This function does little more than loop over the pipe and call 922 * @actor to do the actual moving of a single struct pipe_buffer to 923 * the desired destination. See pipe_to_file, pipe_to_sendpage, or 924 * pipe_to_user. 925 * 926 */ 927 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, 928 splice_actor *actor) 929 { 930 int ret; 931 932 splice_from_pipe_begin(sd); 933 do { 934 ret = splice_from_pipe_next(pipe, sd); 935 if (ret > 0) 936 ret = splice_from_pipe_feed(pipe, sd, actor); 937 } while (ret > 0); 938 splice_from_pipe_end(pipe, sd); 939 940 return sd->num_spliced ? sd->num_spliced : ret; 941 } 942 EXPORT_SYMBOL(__splice_from_pipe); 943 944 /** 945 * splice_from_pipe - splice data from a pipe to a file 946 * @pipe: pipe to splice from 947 * @out: file to splice to 948 * @ppos: position in @out 949 * @len: how many bytes to splice 950 * @flags: splice modifier flags 951 * @actor: handler that splices the data 952 * 953 * Description: 954 * See __splice_from_pipe. This function locks the pipe inode, 955 * otherwise it's identical to __splice_from_pipe(). 956 * 957 */ 958 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, 959 loff_t *ppos, size_t len, unsigned int flags, 960 splice_actor *actor) 961 { 962 ssize_t ret; 963 struct splice_desc sd = { 964 .total_len = len, 965 .flags = flags, 966 .pos = *ppos, 967 .u.file = out, 968 }; 969 970 pipe_lock(pipe); 971 ret = __splice_from_pipe(pipe, &sd, actor); 972 pipe_unlock(pipe); 973 974 return ret; 975 } 976 977 /** 978 * generic_file_splice_write - splice data from a pipe to a file 979 * @pipe: pipe info 980 * @out: file to write to 981 * @ppos: position in @out 982 * @len: number of bytes to splice 983 * @flags: splice modifier flags 984 * 985 * Description: 986 * Will either move or copy pages (determined by @flags options) from 987 * the given pipe inode to the given file. 988 * 989 */ 990 ssize_t 991 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, 992 loff_t *ppos, size_t len, unsigned int flags) 993 { 994 struct address_space *mapping = out->f_mapping; 995 struct inode *inode = mapping->host; 996 struct splice_desc sd = { 997 .total_len = len, 998 .flags = flags, 999 .pos = *ppos, 1000 .u.file = out, 1001 }; 1002 ssize_t ret; 1003 1004 pipe_lock(pipe); 1005 1006 splice_from_pipe_begin(&sd); 1007 do { 1008 ret = splice_from_pipe_next(pipe, &sd); 1009 if (ret <= 0) 1010 break; 1011 1012 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); 1013 ret = file_remove_suid(out); 1014 if (!ret) { 1015 ret = file_update_time(out); 1016 if (!ret) 1017 ret = splice_from_pipe_feed(pipe, &sd, 1018 pipe_to_file); 1019 } 1020 mutex_unlock(&inode->i_mutex); 1021 } while (ret > 0); 1022 splice_from_pipe_end(pipe, &sd); 1023 1024 pipe_unlock(pipe); 1025 1026 if (sd.num_spliced) 1027 ret = sd.num_spliced; 1028 1029 if (ret > 0) { 1030 int err; 1031 1032 err = generic_write_sync(out, *ppos, ret); 1033 if (err) 1034 ret = err; 1035 else 1036 *ppos += ret; 1037 balance_dirty_pages_ratelimited(mapping); 1038 } 1039 1040 return ret; 1041 } 1042 1043 EXPORT_SYMBOL(generic_file_splice_write); 1044 1045 static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1046 struct splice_desc *sd) 1047 { 1048 int ret; 1049 void *data; 1050 loff_t tmp = sd->pos; 1051 1052 data = buf->ops->map(pipe, buf, 0); 1053 ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); 1054 buf->ops->unmap(pipe, buf, data); 1055 1056 return ret; 1057 } 1058 1059 static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, 1060 struct file *out, loff_t *ppos, 1061 size_t len, unsigned int flags) 1062 { 1063 ssize_t ret; 1064 1065 ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); 1066 if (ret > 0) 1067 *ppos += ret; 1068 1069 return ret; 1070 } 1071 1072 /** 1073 * generic_splice_sendpage - splice data from a pipe to a socket 1074 * @pipe: pipe to splice from 1075 * @out: socket to write to 1076 * @ppos: position in @out 1077 * @len: number of bytes to splice 1078 * @flags: splice modifier flags 1079 * 1080 * Description: 1081 * Will send @len bytes from the pipe to a network socket. No data copying 1082 * is involved. 1083 * 1084 */ 1085 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, 1086 loff_t *ppos, size_t len, unsigned int flags) 1087 { 1088 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); 1089 } 1090 1091 EXPORT_SYMBOL(generic_splice_sendpage); 1092 1093 /* 1094 * Attempt to initiate a splice from pipe to file. 1095 */ 1096 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 1097 loff_t *ppos, size_t len, unsigned int flags) 1098 { 1099 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, 1100 loff_t *, size_t, unsigned int); 1101 1102 if (out->f_op->splice_write) 1103 splice_write = out->f_op->splice_write; 1104 else 1105 splice_write = default_file_splice_write; 1106 1107 return splice_write(pipe, out, ppos, len, flags); 1108 } 1109 1110 /* 1111 * Attempt to initiate a splice from a file to a pipe. 1112 */ 1113 static long do_splice_to(struct file *in, loff_t *ppos, 1114 struct pipe_inode_info *pipe, size_t len, 1115 unsigned int flags) 1116 { 1117 ssize_t (*splice_read)(struct file *, loff_t *, 1118 struct pipe_inode_info *, size_t, unsigned int); 1119 int ret; 1120 1121 if (unlikely(!(in->f_mode & FMODE_READ))) 1122 return -EBADF; 1123 1124 ret = rw_verify_area(READ, in, ppos, len); 1125 if (unlikely(ret < 0)) 1126 return ret; 1127 1128 if (in->f_op->splice_read) 1129 splice_read = in->f_op->splice_read; 1130 else 1131 splice_read = default_file_splice_read; 1132 1133 return splice_read(in, ppos, pipe, len, flags); 1134 } 1135 1136 /** 1137 * splice_direct_to_actor - splices data directly between two non-pipes 1138 * @in: file to splice from 1139 * @sd: actor information on where to splice to 1140 * @actor: handles the data splicing 1141 * 1142 * Description: 1143 * This is a special case helper to splice directly between two 1144 * points, without requiring an explicit pipe. Internally an allocated 1145 * pipe is cached in the process, and reused during the lifetime of 1146 * that process. 1147 * 1148 */ 1149 ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, 1150 splice_direct_actor *actor) 1151 { 1152 struct pipe_inode_info *pipe; 1153 long ret, bytes; 1154 umode_t i_mode; 1155 size_t len; 1156 int i, flags; 1157 1158 /* 1159 * We require the input being a regular file, as we don't want to 1160 * randomly drop data for eg socket -> socket splicing. Use the 1161 * piped splicing for that! 1162 */ 1163 i_mode = file_inode(in)->i_mode; 1164 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) 1165 return -EINVAL; 1166 1167 /* 1168 * neither in nor out is a pipe, setup an internal pipe attached to 1169 * 'out' and transfer the wanted data from 'in' to 'out' through that 1170 */ 1171 pipe = current->splice_pipe; 1172 if (unlikely(!pipe)) { 1173 pipe = alloc_pipe_info(); 1174 if (!pipe) 1175 return -ENOMEM; 1176 1177 /* 1178 * We don't have an immediate reader, but we'll read the stuff 1179 * out of the pipe right after the splice_to_pipe(). So set 1180 * PIPE_READERS appropriately. 1181 */ 1182 pipe->readers = 1; 1183 1184 current->splice_pipe = pipe; 1185 } 1186 1187 /* 1188 * Do the splice. 1189 */ 1190 ret = 0; 1191 bytes = 0; 1192 len = sd->total_len; 1193 flags = sd->flags; 1194 1195 /* 1196 * Don't block on output, we have to drain the direct pipe. 1197 */ 1198 sd->flags &= ~SPLICE_F_NONBLOCK; 1199 1200 while (len) { 1201 size_t read_len; 1202 loff_t pos = sd->pos, prev_pos = pos; 1203 1204 ret = do_splice_to(in, &pos, pipe, len, flags); 1205 if (unlikely(ret <= 0)) 1206 goto out_release; 1207 1208 read_len = ret; 1209 sd->total_len = read_len; 1210 1211 /* 1212 * NOTE: nonblocking mode only applies to the input. We 1213 * must not do the output in nonblocking mode as then we 1214 * could get stuck data in the internal pipe: 1215 */ 1216 ret = actor(pipe, sd); 1217 if (unlikely(ret <= 0)) { 1218 sd->pos = prev_pos; 1219 goto out_release; 1220 } 1221 1222 bytes += ret; 1223 len -= ret; 1224 sd->pos = pos; 1225 1226 if (ret < read_len) { 1227 sd->pos = prev_pos + ret; 1228 goto out_release; 1229 } 1230 } 1231 1232 done: 1233 pipe->nrbufs = pipe->curbuf = 0; 1234 file_accessed(in); 1235 return bytes; 1236 1237 out_release: 1238 /* 1239 * If we did an incomplete transfer we must release 1240 * the pipe buffers in question: 1241 */ 1242 for (i = 0; i < pipe->buffers; i++) { 1243 struct pipe_buffer *buf = pipe->bufs + i; 1244 1245 if (buf->ops) { 1246 buf->ops->release(pipe, buf); 1247 buf->ops = NULL; 1248 } 1249 } 1250 1251 if (!bytes) 1252 bytes = ret; 1253 1254 goto done; 1255 } 1256 EXPORT_SYMBOL(splice_direct_to_actor); 1257 1258 static int direct_splice_actor(struct pipe_inode_info *pipe, 1259 struct splice_desc *sd) 1260 { 1261 struct file *file = sd->u.file; 1262 1263 return do_splice_from(pipe, file, sd->opos, sd->total_len, 1264 sd->flags); 1265 } 1266 1267 /** 1268 * do_splice_direct - splices data directly between two files 1269 * @in: file to splice from 1270 * @ppos: input file offset 1271 * @out: file to splice to 1272 * @opos: output file offset 1273 * @len: number of bytes to splice 1274 * @flags: splice modifier flags 1275 * 1276 * Description: 1277 * For use by do_sendfile(). splice can easily emulate sendfile, but 1278 * doing it in the application would incur an extra system call 1279 * (splice in + splice out, as compared to just sendfile()). So this helper 1280 * can splice directly through a process-private pipe. 1281 * 1282 */ 1283 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 1284 loff_t *opos, size_t len, unsigned int flags) 1285 { 1286 struct splice_desc sd = { 1287 .len = len, 1288 .total_len = len, 1289 .flags = flags, 1290 .pos = *ppos, 1291 .u.file = out, 1292 .opos = opos, 1293 }; 1294 long ret; 1295 1296 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1297 return -EBADF; 1298 1299 if (unlikely(out->f_flags & O_APPEND)) 1300 return -EINVAL; 1301 1302 ret = rw_verify_area(WRITE, out, opos, len); 1303 if (unlikely(ret < 0)) 1304 return ret; 1305 1306 ret = splice_direct_to_actor(in, &sd, direct_splice_actor); 1307 if (ret > 0) 1308 *ppos = sd.pos; 1309 1310 return ret; 1311 } 1312 1313 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1314 struct pipe_inode_info *opipe, 1315 size_t len, unsigned int flags); 1316 1317 /* 1318 * Determine where to splice to/from. 1319 */ 1320 static long do_splice(struct file *in, loff_t __user *off_in, 1321 struct file *out, loff_t __user *off_out, 1322 size_t len, unsigned int flags) 1323 { 1324 struct pipe_inode_info *ipipe; 1325 struct pipe_inode_info *opipe; 1326 loff_t offset; 1327 long ret; 1328 1329 ipipe = get_pipe_info(in); 1330 opipe = get_pipe_info(out); 1331 1332 if (ipipe && opipe) { 1333 if (off_in || off_out) 1334 return -ESPIPE; 1335 1336 if (!(in->f_mode & FMODE_READ)) 1337 return -EBADF; 1338 1339 if (!(out->f_mode & FMODE_WRITE)) 1340 return -EBADF; 1341 1342 /* Splicing to self would be fun, but... */ 1343 if (ipipe == opipe) 1344 return -EINVAL; 1345 1346 return splice_pipe_to_pipe(ipipe, opipe, len, flags); 1347 } 1348 1349 if (ipipe) { 1350 if (off_in) 1351 return -ESPIPE; 1352 if (off_out) { 1353 if (!(out->f_mode & FMODE_PWRITE)) 1354 return -EINVAL; 1355 if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1356 return -EFAULT; 1357 } else { 1358 offset = out->f_pos; 1359 } 1360 1361 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1362 return -EBADF; 1363 1364 if (unlikely(out->f_flags & O_APPEND)) 1365 return -EINVAL; 1366 1367 ret = rw_verify_area(WRITE, out, &offset, len); 1368 if (unlikely(ret < 0)) 1369 return ret; 1370 1371 file_start_write(out); 1372 ret = do_splice_from(ipipe, out, &offset, len, flags); 1373 file_end_write(out); 1374 1375 if (!off_out) 1376 out->f_pos = offset; 1377 else if (copy_to_user(off_out, &offset, sizeof(loff_t))) 1378 ret = -EFAULT; 1379 1380 return ret; 1381 } 1382 1383 if (opipe) { 1384 if (off_out) 1385 return -ESPIPE; 1386 if (off_in) { 1387 if (!(in->f_mode & FMODE_PREAD)) 1388 return -EINVAL; 1389 if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1390 return -EFAULT; 1391 } else { 1392 offset = in->f_pos; 1393 } 1394 1395 ret = do_splice_to(in, &offset, opipe, len, flags); 1396 1397 if (!off_in) 1398 in->f_pos = offset; 1399 else if (copy_to_user(off_in, &offset, sizeof(loff_t))) 1400 ret = -EFAULT; 1401 1402 return ret; 1403 } 1404 1405 return -EINVAL; 1406 } 1407 1408 /* 1409 * Map an iov into an array of pages and offset/length tupples. With the 1410 * partial_page structure, we can map several non-contiguous ranges into 1411 * our ones pages[] map instead of splitting that operation into pieces. 1412 * Could easily be exported as a generic helper for other users, in which 1413 * case one would probably want to add a 'max_nr_pages' parameter as well. 1414 */ 1415 static int get_iovec_page_array(const struct iovec __user *iov, 1416 unsigned int nr_vecs, struct page **pages, 1417 struct partial_page *partial, bool aligned, 1418 unsigned int pipe_buffers) 1419 { 1420 int buffers = 0, error = 0; 1421 1422 while (nr_vecs) { 1423 unsigned long off, npages; 1424 struct iovec entry; 1425 void __user *base; 1426 size_t len; 1427 int i; 1428 1429 error = -EFAULT; 1430 if (copy_from_user(&entry, iov, sizeof(entry))) 1431 break; 1432 1433 base = entry.iov_base; 1434 len = entry.iov_len; 1435 1436 /* 1437 * Sanity check this iovec. 0 read succeeds. 1438 */ 1439 error = 0; 1440 if (unlikely(!len)) 1441 break; 1442 error = -EFAULT; 1443 if (!access_ok(VERIFY_READ, base, len)) 1444 break; 1445 1446 /* 1447 * Get this base offset and number of pages, then map 1448 * in the user pages. 1449 */ 1450 off = (unsigned long) base & ~PAGE_MASK; 1451 1452 /* 1453 * If asked for alignment, the offset must be zero and the 1454 * length a multiple of the PAGE_SIZE. 1455 */ 1456 error = -EINVAL; 1457 if (aligned && (off || len & ~PAGE_MASK)) 1458 break; 1459 1460 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1461 if (npages > pipe_buffers - buffers) 1462 npages = pipe_buffers - buffers; 1463 1464 error = get_user_pages_fast((unsigned long)base, npages, 1465 0, &pages[buffers]); 1466 1467 if (unlikely(error <= 0)) 1468 break; 1469 1470 /* 1471 * Fill this contiguous range into the partial page map. 1472 */ 1473 for (i = 0; i < error; i++) { 1474 const int plen = min_t(size_t, len, PAGE_SIZE - off); 1475 1476 partial[buffers].offset = off; 1477 partial[buffers].len = plen; 1478 1479 off = 0; 1480 len -= plen; 1481 buffers++; 1482 } 1483 1484 /* 1485 * We didn't complete this iov, stop here since it probably 1486 * means we have to move some of this into a pipe to 1487 * be able to continue. 1488 */ 1489 if (len) 1490 break; 1491 1492 /* 1493 * Don't continue if we mapped fewer pages than we asked for, 1494 * or if we mapped the max number of pages that we have 1495 * room for. 1496 */ 1497 if (error < npages || buffers == pipe_buffers) 1498 break; 1499 1500 nr_vecs--; 1501 iov++; 1502 } 1503 1504 if (buffers) 1505 return buffers; 1506 1507 return error; 1508 } 1509 1510 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 1511 struct splice_desc *sd) 1512 { 1513 char *src; 1514 int ret; 1515 1516 /* 1517 * See if we can use the atomic maps, by prefaulting in the 1518 * pages and doing an atomic copy 1519 */ 1520 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) { 1521 src = buf->ops->map(pipe, buf, 1); 1522 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset, 1523 sd->len); 1524 buf->ops->unmap(pipe, buf, src); 1525 if (!ret) { 1526 ret = sd->len; 1527 goto out; 1528 } 1529 } 1530 1531 /* 1532 * No dice, use slow non-atomic map and copy 1533 */ 1534 src = buf->ops->map(pipe, buf, 0); 1535 1536 ret = sd->len; 1537 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len)) 1538 ret = -EFAULT; 1539 1540 buf->ops->unmap(pipe, buf, src); 1541 out: 1542 if (ret > 0) 1543 sd->u.userptr += ret; 1544 return ret; 1545 } 1546 1547 /* 1548 * For lack of a better implementation, implement vmsplice() to userspace 1549 * as a simple copy of the pipes pages to the user iov. 1550 */ 1551 static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, 1552 unsigned long nr_segs, unsigned int flags) 1553 { 1554 struct pipe_inode_info *pipe; 1555 struct splice_desc sd; 1556 ssize_t size; 1557 int error; 1558 long ret; 1559 1560 pipe = get_pipe_info(file); 1561 if (!pipe) 1562 return -EBADF; 1563 1564 pipe_lock(pipe); 1565 1566 error = ret = 0; 1567 while (nr_segs) { 1568 void __user *base; 1569 size_t len; 1570 1571 /* 1572 * Get user address base and length for this iovec. 1573 */ 1574 error = get_user(base, &iov->iov_base); 1575 if (unlikely(error)) 1576 break; 1577 error = get_user(len, &iov->iov_len); 1578 if (unlikely(error)) 1579 break; 1580 1581 /* 1582 * Sanity check this iovec. 0 read succeeds. 1583 */ 1584 if (unlikely(!len)) 1585 break; 1586 if (unlikely(!base)) { 1587 error = -EFAULT; 1588 break; 1589 } 1590 1591 if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { 1592 error = -EFAULT; 1593 break; 1594 } 1595 1596 sd.len = 0; 1597 sd.total_len = len; 1598 sd.flags = flags; 1599 sd.u.userptr = base; 1600 sd.pos = 0; 1601 1602 size = __splice_from_pipe(pipe, &sd, pipe_to_user); 1603 if (size < 0) { 1604 if (!ret) 1605 ret = size; 1606 1607 break; 1608 } 1609 1610 ret += size; 1611 1612 if (size < len) 1613 break; 1614 1615 nr_segs--; 1616 iov++; 1617 } 1618 1619 pipe_unlock(pipe); 1620 1621 if (!ret) 1622 ret = error; 1623 1624 return ret; 1625 } 1626 1627 /* 1628 * vmsplice splices a user address range into a pipe. It can be thought of 1629 * as splice-from-memory, where the regular splice is splice-from-file (or 1630 * to file). In both cases the output is a pipe, naturally. 1631 */ 1632 static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, 1633 unsigned long nr_segs, unsigned int flags) 1634 { 1635 struct pipe_inode_info *pipe; 1636 struct page *pages[PIPE_DEF_BUFFERS]; 1637 struct partial_page partial[PIPE_DEF_BUFFERS]; 1638 struct splice_pipe_desc spd = { 1639 .pages = pages, 1640 .partial = partial, 1641 .nr_pages_max = PIPE_DEF_BUFFERS, 1642 .flags = flags, 1643 .ops = &user_page_pipe_buf_ops, 1644 .spd_release = spd_release_page, 1645 }; 1646 long ret; 1647 1648 pipe = get_pipe_info(file); 1649 if (!pipe) 1650 return -EBADF; 1651 1652 if (splice_grow_spd(pipe, &spd)) 1653 return -ENOMEM; 1654 1655 spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, 1656 spd.partial, false, 1657 spd.nr_pages_max); 1658 if (spd.nr_pages <= 0) 1659 ret = spd.nr_pages; 1660 else 1661 ret = splice_to_pipe(pipe, &spd); 1662 1663 splice_shrink_spd(&spd); 1664 return ret; 1665 } 1666 1667 /* 1668 * Note that vmsplice only really supports true splicing _from_ user memory 1669 * to a pipe, not the other way around. Splicing from user memory is a simple 1670 * operation that can be supported without any funky alignment restrictions 1671 * or nasty vm tricks. We simply map in the user memory and fill them into 1672 * a pipe. The reverse isn't quite as easy, though. There are two possible 1673 * solutions for that: 1674 * 1675 * - memcpy() the data internally, at which point we might as well just 1676 * do a regular read() on the buffer anyway. 1677 * - Lots of nasty vm tricks, that are neither fast nor flexible (it 1678 * has restriction limitations on both ends of the pipe). 1679 * 1680 * Currently we punt and implement it as a normal copy, see pipe_to_user(). 1681 * 1682 */ 1683 SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov, 1684 unsigned long, nr_segs, unsigned int, flags) 1685 { 1686 struct fd f; 1687 long error; 1688 1689 if (unlikely(nr_segs > UIO_MAXIOV)) 1690 return -EINVAL; 1691 else if (unlikely(!nr_segs)) 1692 return 0; 1693 1694 error = -EBADF; 1695 f = fdget(fd); 1696 if (f.file) { 1697 if (f.file->f_mode & FMODE_WRITE) 1698 error = vmsplice_to_pipe(f.file, iov, nr_segs, flags); 1699 else if (f.file->f_mode & FMODE_READ) 1700 error = vmsplice_to_user(f.file, iov, nr_segs, flags); 1701 1702 fdput(f); 1703 } 1704 1705 return error; 1706 } 1707 1708 #ifdef CONFIG_COMPAT 1709 COMPAT_SYSCALL_DEFINE4(vmsplice, int, fd, const struct compat_iovec __user *, iov32, 1710 unsigned int, nr_segs, unsigned int, flags) 1711 { 1712 unsigned i; 1713 struct iovec __user *iov; 1714 if (nr_segs > UIO_MAXIOV) 1715 return -EINVAL; 1716 iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); 1717 for (i = 0; i < nr_segs; i++) { 1718 struct compat_iovec v; 1719 if (get_user(v.iov_base, &iov32[i].iov_base) || 1720 get_user(v.iov_len, &iov32[i].iov_len) || 1721 put_user(compat_ptr(v.iov_base), &iov[i].iov_base) || 1722 put_user(v.iov_len, &iov[i].iov_len)) 1723 return -EFAULT; 1724 } 1725 return sys_vmsplice(fd, iov, nr_segs, flags); 1726 } 1727 #endif 1728 1729 SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, 1730 int, fd_out, loff_t __user *, off_out, 1731 size_t, len, unsigned int, flags) 1732 { 1733 struct fd in, out; 1734 long error; 1735 1736 if (unlikely(!len)) 1737 return 0; 1738 1739 error = -EBADF; 1740 in = fdget(fd_in); 1741 if (in.file) { 1742 if (in.file->f_mode & FMODE_READ) { 1743 out = fdget(fd_out); 1744 if (out.file) { 1745 if (out.file->f_mode & FMODE_WRITE) 1746 error = do_splice(in.file, off_in, 1747 out.file, off_out, 1748 len, flags); 1749 fdput(out); 1750 } 1751 } 1752 fdput(in); 1753 } 1754 return error; 1755 } 1756 1757 /* 1758 * Make sure there's data to read. Wait for input if we can, otherwise 1759 * return an appropriate error. 1760 */ 1761 static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1762 { 1763 int ret; 1764 1765 /* 1766 * Check ->nrbufs without the inode lock first. This function 1767 * is speculative anyways, so missing one is ok. 1768 */ 1769 if (pipe->nrbufs) 1770 return 0; 1771 1772 ret = 0; 1773 pipe_lock(pipe); 1774 1775 while (!pipe->nrbufs) { 1776 if (signal_pending(current)) { 1777 ret = -ERESTARTSYS; 1778 break; 1779 } 1780 if (!pipe->writers) 1781 break; 1782 if (!pipe->waiting_writers) { 1783 if (flags & SPLICE_F_NONBLOCK) { 1784 ret = -EAGAIN; 1785 break; 1786 } 1787 } 1788 pipe_wait(pipe); 1789 } 1790 1791 pipe_unlock(pipe); 1792 return ret; 1793 } 1794 1795 /* 1796 * Make sure there's writeable room. Wait for room if we can, otherwise 1797 * return an appropriate error. 1798 */ 1799 static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1800 { 1801 int ret; 1802 1803 /* 1804 * Check ->nrbufs without the inode lock first. This function 1805 * is speculative anyways, so missing one is ok. 1806 */ 1807 if (pipe->nrbufs < pipe->buffers) 1808 return 0; 1809 1810 ret = 0; 1811 pipe_lock(pipe); 1812 1813 while (pipe->nrbufs >= pipe->buffers) { 1814 if (!pipe->readers) { 1815 send_sig(SIGPIPE, current, 0); 1816 ret = -EPIPE; 1817 break; 1818 } 1819 if (flags & SPLICE_F_NONBLOCK) { 1820 ret = -EAGAIN; 1821 break; 1822 } 1823 if (signal_pending(current)) { 1824 ret = -ERESTARTSYS; 1825 break; 1826 } 1827 pipe->waiting_writers++; 1828 pipe_wait(pipe); 1829 pipe->waiting_writers--; 1830 } 1831 1832 pipe_unlock(pipe); 1833 return ret; 1834 } 1835 1836 /* 1837 * Splice contents of ipipe to opipe. 1838 */ 1839 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1840 struct pipe_inode_info *opipe, 1841 size_t len, unsigned int flags) 1842 { 1843 struct pipe_buffer *ibuf, *obuf; 1844 int ret = 0, nbuf; 1845 bool input_wakeup = false; 1846 1847 1848 retry: 1849 ret = ipipe_prep(ipipe, flags); 1850 if (ret) 1851 return ret; 1852 1853 ret = opipe_prep(opipe, flags); 1854 if (ret) 1855 return ret; 1856 1857 /* 1858 * Potential ABBA deadlock, work around it by ordering lock 1859 * grabbing by pipe info address. Otherwise two different processes 1860 * could deadlock (one doing tee from A -> B, the other from B -> A). 1861 */ 1862 pipe_double_lock(ipipe, opipe); 1863 1864 do { 1865 if (!opipe->readers) { 1866 send_sig(SIGPIPE, current, 0); 1867 if (!ret) 1868 ret = -EPIPE; 1869 break; 1870 } 1871 1872 if (!ipipe->nrbufs && !ipipe->writers) 1873 break; 1874 1875 /* 1876 * Cannot make any progress, because either the input 1877 * pipe is empty or the output pipe is full. 1878 */ 1879 if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) { 1880 /* Already processed some buffers, break */ 1881 if (ret) 1882 break; 1883 1884 if (flags & SPLICE_F_NONBLOCK) { 1885 ret = -EAGAIN; 1886 break; 1887 } 1888 1889 /* 1890 * We raced with another reader/writer and haven't 1891 * managed to process any buffers. A zero return 1892 * value means EOF, so retry instead. 1893 */ 1894 pipe_unlock(ipipe); 1895 pipe_unlock(opipe); 1896 goto retry; 1897 } 1898 1899 ibuf = ipipe->bufs + ipipe->curbuf; 1900 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); 1901 obuf = opipe->bufs + nbuf; 1902 1903 if (len >= ibuf->len) { 1904 /* 1905 * Simply move the whole buffer from ipipe to opipe 1906 */ 1907 *obuf = *ibuf; 1908 ibuf->ops = NULL; 1909 opipe->nrbufs++; 1910 ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1); 1911 ipipe->nrbufs--; 1912 input_wakeup = true; 1913 } else { 1914 /* 1915 * Get a reference to this pipe buffer, 1916 * so we can copy the contents over. 1917 */ 1918 ibuf->ops->get(ipipe, ibuf); 1919 *obuf = *ibuf; 1920 1921 /* 1922 * Don't inherit the gift flag, we need to 1923 * prevent multiple steals of this page. 1924 */ 1925 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 1926 1927 obuf->len = len; 1928 opipe->nrbufs++; 1929 ibuf->offset += obuf->len; 1930 ibuf->len -= obuf->len; 1931 } 1932 ret += obuf->len; 1933 len -= obuf->len; 1934 } while (len); 1935 1936 pipe_unlock(ipipe); 1937 pipe_unlock(opipe); 1938 1939 /* 1940 * If we put data in the output pipe, wakeup any potential readers. 1941 */ 1942 if (ret > 0) 1943 wakeup_pipe_readers(opipe); 1944 1945 if (input_wakeup) 1946 wakeup_pipe_writers(ipipe); 1947 1948 return ret; 1949 } 1950 1951 /* 1952 * Link contents of ipipe to opipe. 1953 */ 1954 static int link_pipe(struct pipe_inode_info *ipipe, 1955 struct pipe_inode_info *opipe, 1956 size_t len, unsigned int flags) 1957 { 1958 struct pipe_buffer *ibuf, *obuf; 1959 int ret = 0, i = 0, nbuf; 1960 1961 /* 1962 * Potential ABBA deadlock, work around it by ordering lock 1963 * grabbing by pipe info address. Otherwise two different processes 1964 * could deadlock (one doing tee from A -> B, the other from B -> A). 1965 */ 1966 pipe_double_lock(ipipe, opipe); 1967 1968 do { 1969 if (!opipe->readers) { 1970 send_sig(SIGPIPE, current, 0); 1971 if (!ret) 1972 ret = -EPIPE; 1973 break; 1974 } 1975 1976 /* 1977 * If we have iterated all input buffers or ran out of 1978 * output room, break. 1979 */ 1980 if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) 1981 break; 1982 1983 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1)); 1984 nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1); 1985 1986 /* 1987 * Get a reference to this pipe buffer, 1988 * so we can copy the contents over. 1989 */ 1990 ibuf->ops->get(ipipe, ibuf); 1991 1992 obuf = opipe->bufs + nbuf; 1993 *obuf = *ibuf; 1994 1995 /* 1996 * Don't inherit the gift flag, we need to 1997 * prevent multiple steals of this page. 1998 */ 1999 obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 2000 2001 if (obuf->len > len) 2002 obuf->len = len; 2003 2004 opipe->nrbufs++; 2005 ret += obuf->len; 2006 len -= obuf->len; 2007 i++; 2008 } while (len); 2009 2010 /* 2011 * return EAGAIN if we have the potential of some data in the 2012 * future, otherwise just return 0 2013 */ 2014 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) 2015 ret = -EAGAIN; 2016 2017 pipe_unlock(ipipe); 2018 pipe_unlock(opipe); 2019 2020 /* 2021 * If we put data in the output pipe, wakeup any potential readers. 2022 */ 2023 if (ret > 0) 2024 wakeup_pipe_readers(opipe); 2025 2026 return ret; 2027 } 2028 2029 /* 2030 * This is a tee(1) implementation that works on pipes. It doesn't copy 2031 * any data, it simply references the 'in' pages on the 'out' pipe. 2032 * The 'flags' used are the SPLICE_F_* variants, currently the only 2033 * applicable one is SPLICE_F_NONBLOCK. 2034 */ 2035 static long do_tee(struct file *in, struct file *out, size_t len, 2036 unsigned int flags) 2037 { 2038 struct pipe_inode_info *ipipe = get_pipe_info(in); 2039 struct pipe_inode_info *opipe = get_pipe_info(out); 2040 int ret = -EINVAL; 2041 2042 /* 2043 * Duplicate the contents of ipipe to opipe without actually 2044 * copying the data. 2045 */ 2046 if (ipipe && opipe && ipipe != opipe) { 2047 /* 2048 * Keep going, unless we encounter an error. The ipipe/opipe 2049 * ordering doesn't really matter. 2050 */ 2051 ret = ipipe_prep(ipipe, flags); 2052 if (!ret) { 2053 ret = opipe_prep(opipe, flags); 2054 if (!ret) 2055 ret = link_pipe(ipipe, opipe, len, flags); 2056 } 2057 } 2058 2059 return ret; 2060 } 2061 2062 SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) 2063 { 2064 struct fd in; 2065 int error; 2066 2067 if (unlikely(!len)) 2068 return 0; 2069 2070 error = -EBADF; 2071 in = fdget(fdin); 2072 if (in.file) { 2073 if (in.file->f_mode & FMODE_READ) { 2074 struct fd out = fdget(fdout); 2075 if (out.file) { 2076 if (out.file->f_mode & FMODE_WRITE) 2077 error = do_tee(in.file, out.file, 2078 len, flags); 2079 fdput(out); 2080 } 2081 } 2082 fdput(in); 2083 } 2084 2085 return error; 2086 } 2087