15274f052SJens Axboe /* 25274f052SJens Axboe * "splice": joining two ropes together by interweaving their strands. 35274f052SJens Axboe * 45274f052SJens Axboe * This is the "extended pipe" functionality, where a pipe is used as 55274f052SJens Axboe * an arbitrary in-memory buffer. Think of a pipe as a small kernel 65274f052SJens Axboe * buffer that you can use to transfer data from one end to the other. 75274f052SJens Axboe * 85274f052SJens Axboe * The traditional unix read/write is extended with a "splice()" operation 95274f052SJens Axboe * that transfers data buffers to or from a pipe buffer. 105274f052SJens Axboe * 115274f052SJens Axboe * Named by Larry McVoy, original implementation from Linus, extended by 12c2058e06SJens Axboe * Jens to support splicing to files, network, direct splicing, etc and 13c2058e06SJens Axboe * fixing lots of bugs. 145274f052SJens Axboe * 150fe23479SJens Axboe * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> 16c2058e06SJens Axboe * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> 17c2058e06SJens Axboe * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> 185274f052SJens Axboe * 195274f052SJens Axboe */ 205274f052SJens Axboe #include <linux/fs.h> 215274f052SJens Axboe #include <linux/file.h> 225274f052SJens Axboe #include <linux/pagemap.h> 235274f052SJens Axboe #include <linux/pipe_fs_i.h> 245274f052SJens Axboe #include <linux/mm_inline.h> 255abc97aaSJens Axboe #include <linux/swap.h> 264f6f0bd2SJens Axboe #include <linux/writeback.h> 274f6f0bd2SJens Axboe #include <linux/buffer_head.h> 28a0f06780SJeff Garzik #include <linux/module.h> 294f6f0bd2SJens Axboe #include <linux/syscalls.h> 30912d35f8SJens Axboe #include <linux/uio.h> 315274f052SJens Axboe 32912d35f8SJens Axboe struct partial_page { 33912d35f8SJens Axboe unsigned int offset; 34912d35f8SJens Axboe unsigned int len; 35912d35f8SJens Axboe }; 36912d35f8SJens Axboe 37912d35f8SJens Axboe /* 3800522fb4SJens Axboe * Passed to splice_to_pipe 39912d35f8SJens Axboe */ 40912d35f8SJens Axboe struct splice_pipe_desc { 41912d35f8SJens Axboe struct page **pages; /* page map */ 42912d35f8SJens Axboe struct partial_page *partial; /* pages[] may not be contig */ 43912d35f8SJens Axboe int nr_pages; /* number of pages in map */ 44912d35f8SJens Axboe unsigned int flags; /* splice flags */ 45912d35f8SJens Axboe struct pipe_buf_operations *ops;/* ops associated with output pipe */ 46912d35f8SJens Axboe }; 47912d35f8SJens Axboe 4883f9135bSJens Axboe /* 4983f9135bSJens Axboe * Attempt to steal a page from a pipe buffer. This should perhaps go into 5083f9135bSJens Axboe * a vm helper function, it's already simplified quite a bit by the 5183f9135bSJens Axboe * addition of remove_mapping(). If success is returned, the caller may 5283f9135bSJens Axboe * attempt to reuse this page for another destination. 5383f9135bSJens Axboe */ 5476ad4d11SJens Axboe static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, 555abc97aaSJens Axboe struct pipe_buffer *buf) 565abc97aaSJens Axboe { 575abc97aaSJens Axboe struct page *page = buf->page; 589e94cd4fSJens Axboe struct address_space *mapping; 595abc97aaSJens Axboe 609e0267c2SJens Axboe lock_page(page); 619e0267c2SJens Axboe 629e94cd4fSJens Axboe mapping = page_mapping(page); 639e94cd4fSJens Axboe if (mapping) { 645abc97aaSJens Axboe WARN_ON(!PageUptodate(page)); 655abc97aaSJens Axboe 66ad8d6f0aSJens Axboe /* 679e94cd4fSJens Axboe * At least for ext2 with nobh option, we need to wait on 689e94cd4fSJens Axboe * writeback completing on this page, since we'll remove it 699e94cd4fSJens Axboe * from the pagecache. Otherwise truncate wont wait on the 709e94cd4fSJens Axboe * page, allowing the disk blocks to be reused by someone else 719e94cd4fSJens Axboe * before we actually wrote our data to them. fs corruption 729e94cd4fSJens Axboe * ensues. 73ad8d6f0aSJens Axboe */ 74ad8d6f0aSJens Axboe wait_on_page_writeback(page); 75ad8d6f0aSJens Axboe 764f6f0bd2SJens Axboe if (PagePrivate(page)) 774f6f0bd2SJens Axboe try_to_release_page(page, mapping_gfp_mask(mapping)); 784f6f0bd2SJens Axboe 799e94cd4fSJens Axboe /* 809e94cd4fSJens Axboe * If we succeeded in removing the mapping, set LRU flag 819e94cd4fSJens Axboe * and return good. 829e94cd4fSJens Axboe */ 839e94cd4fSJens Axboe if (remove_mapping(mapping, page)) { 841432873aSJens Axboe buf->flags |= PIPE_BUF_FLAG_LRU; 855abc97aaSJens Axboe return 0; 865abc97aaSJens Axboe } 879e94cd4fSJens Axboe } 889e94cd4fSJens Axboe 899e94cd4fSJens Axboe /* 909e94cd4fSJens Axboe * Raced with truncate or failed to remove page from current 919e94cd4fSJens Axboe * address space, unlock and return failure. 929e94cd4fSJens Axboe */ 939e94cd4fSJens Axboe unlock_page(page); 949e94cd4fSJens Axboe return 1; 959e94cd4fSJens Axboe } 965abc97aaSJens Axboe 9776ad4d11SJens Axboe static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, 985274f052SJens Axboe struct pipe_buffer *buf) 995274f052SJens Axboe { 1005274f052SJens Axboe page_cache_release(buf->page); 1011432873aSJens Axboe buf->flags &= ~PIPE_BUF_FLAG_LRU; 1025274f052SJens Axboe } 1035274f052SJens Axboe 10476ad4d11SJens Axboe static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe, 1055274f052SJens Axboe struct pipe_buffer *buf) 1065274f052SJens Axboe { 1075274f052SJens Axboe struct page *page = buf->page; 10849d0b21bSJens Axboe int err; 1095274f052SJens Axboe 1105274f052SJens Axboe if (!PageUptodate(page)) { 11149d0b21bSJens Axboe lock_page(page); 1125274f052SJens Axboe 11349d0b21bSJens Axboe /* 11449d0b21bSJens Axboe * Page got truncated/unhashed. This will cause a 0-byte 11573d62d83SIngo Molnar * splice, if this is the first page. 11649d0b21bSJens Axboe */ 1175274f052SJens Axboe if (!page->mapping) { 11849d0b21bSJens Axboe err = -ENODATA; 11949d0b21bSJens Axboe goto error; 1205274f052SJens Axboe } 1215274f052SJens Axboe 12249d0b21bSJens Axboe /* 12373d62d83SIngo Molnar * Uh oh, read-error from disk. 12449d0b21bSJens Axboe */ 12549d0b21bSJens Axboe if (!PageUptodate(page)) { 12649d0b21bSJens Axboe err = -EIO; 12749d0b21bSJens Axboe goto error; 12849d0b21bSJens Axboe } 12949d0b21bSJens Axboe 13049d0b21bSJens Axboe /* 131f84d7519SJens Axboe * Page is ok afterall, we are done. 13249d0b21bSJens Axboe */ 13349d0b21bSJens Axboe unlock_page(page); 13449d0b21bSJens Axboe } 13549d0b21bSJens Axboe 136f84d7519SJens Axboe return 0; 13749d0b21bSJens Axboe error: 13849d0b21bSJens Axboe unlock_page(page); 139f84d7519SJens Axboe return err; 14070524490SJens Axboe } 14170524490SJens Axboe 1425274f052SJens Axboe static struct pipe_buf_operations page_cache_pipe_buf_ops = { 1435274f052SJens Axboe .can_merge = 0, 144f84d7519SJens Axboe .map = generic_pipe_buf_map, 145f84d7519SJens Axboe .unmap = generic_pipe_buf_unmap, 146f84d7519SJens Axboe .pin = page_cache_pipe_buf_pin, 1475274f052SJens Axboe .release = page_cache_pipe_buf_release, 1485abc97aaSJens Axboe .steal = page_cache_pipe_buf_steal, 149f84d7519SJens Axboe .get = generic_pipe_buf_get, 1505274f052SJens Axboe }; 1515274f052SJens Axboe 152912d35f8SJens Axboe static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 153912d35f8SJens Axboe struct pipe_buffer *buf) 154912d35f8SJens Axboe { 1557afa6fd0SJens Axboe if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) 156912d35f8SJens Axboe return 1; 1577afa6fd0SJens Axboe 1581432873aSJens Axboe buf->flags |= PIPE_BUF_FLAG_LRU; 159330ab716SJens Axboe return generic_pipe_buf_steal(pipe, buf); 160912d35f8SJens Axboe } 161912d35f8SJens Axboe 162912d35f8SJens Axboe static struct pipe_buf_operations user_page_pipe_buf_ops = { 163912d35f8SJens Axboe .can_merge = 0, 164f84d7519SJens Axboe .map = generic_pipe_buf_map, 165f84d7519SJens Axboe .unmap = generic_pipe_buf_unmap, 166f84d7519SJens Axboe .pin = generic_pipe_buf_pin, 167912d35f8SJens Axboe .release = page_cache_pipe_buf_release, 168912d35f8SJens Axboe .steal = user_page_pipe_buf_steal, 169f84d7519SJens Axboe .get = generic_pipe_buf_get, 170912d35f8SJens Axboe }; 171912d35f8SJens Axboe 17283f9135bSJens Axboe /* 17383f9135bSJens Axboe * Pipe output worker. This sets up our pipe format with the page cache 17483f9135bSJens Axboe * pipe buffer operations. Otherwise very similar to the regular pipe_writev(). 17583f9135bSJens Axboe */ 17600522fb4SJens Axboe static ssize_t splice_to_pipe(struct pipe_inode_info *pipe, 177912d35f8SJens Axboe struct splice_pipe_desc *spd) 1785274f052SJens Axboe { 179912d35f8SJens Axboe int ret, do_wakeup, page_nr; 1805274f052SJens Axboe 1815274f052SJens Axboe ret = 0; 1825274f052SJens Axboe do_wakeup = 0; 183912d35f8SJens Axboe page_nr = 0; 1845274f052SJens Axboe 1853a326a2cSIngo Molnar if (pipe->inode) 1863a326a2cSIngo Molnar mutex_lock(&pipe->inode->i_mutex); 1875274f052SJens Axboe 1885274f052SJens Axboe for (;;) { 1893a326a2cSIngo Molnar if (!pipe->readers) { 1905274f052SJens Axboe send_sig(SIGPIPE, current, 0); 1915274f052SJens Axboe if (!ret) 1925274f052SJens Axboe ret = -EPIPE; 1935274f052SJens Axboe break; 1945274f052SJens Axboe } 1955274f052SJens Axboe 1966f767b04SJens Axboe if (pipe->nrbufs < PIPE_BUFFERS) { 1976f767b04SJens Axboe int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); 1983a326a2cSIngo Molnar struct pipe_buffer *buf = pipe->bufs + newbuf; 1995274f052SJens Axboe 200912d35f8SJens Axboe buf->page = spd->pages[page_nr]; 201912d35f8SJens Axboe buf->offset = spd->partial[page_nr].offset; 202912d35f8SJens Axboe buf->len = spd->partial[page_nr].len; 203912d35f8SJens Axboe buf->ops = spd->ops; 2047afa6fd0SJens Axboe if (spd->flags & SPLICE_F_GIFT) 2057afa6fd0SJens Axboe buf->flags |= PIPE_BUF_FLAG_GIFT; 2067afa6fd0SJens Axboe 2076f767b04SJens Axboe pipe->nrbufs++; 208912d35f8SJens Axboe page_nr++; 209912d35f8SJens Axboe ret += buf->len; 210912d35f8SJens Axboe 2116f767b04SJens Axboe if (pipe->inode) 2125274f052SJens Axboe do_wakeup = 1; 2135274f052SJens Axboe 214912d35f8SJens Axboe if (!--spd->nr_pages) 2155274f052SJens Axboe break; 2166f767b04SJens Axboe if (pipe->nrbufs < PIPE_BUFFERS) 2175274f052SJens Axboe continue; 2185274f052SJens Axboe 2195274f052SJens Axboe break; 2205274f052SJens Axboe } 2215274f052SJens Axboe 222912d35f8SJens Axboe if (spd->flags & SPLICE_F_NONBLOCK) { 22329e35094SLinus Torvalds if (!ret) 22429e35094SLinus Torvalds ret = -EAGAIN; 22529e35094SLinus Torvalds break; 22629e35094SLinus Torvalds } 22729e35094SLinus Torvalds 2285274f052SJens Axboe if (signal_pending(current)) { 2295274f052SJens Axboe if (!ret) 2305274f052SJens Axboe ret = -ERESTARTSYS; 2315274f052SJens Axboe break; 2325274f052SJens Axboe } 2335274f052SJens Axboe 2345274f052SJens Axboe if (do_wakeup) { 235c0bd1f65SJens Axboe smp_mb(); 2363a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 2373a326a2cSIngo Molnar wake_up_interruptible_sync(&pipe->wait); 2383a326a2cSIngo Molnar kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 2395274f052SJens Axboe do_wakeup = 0; 2405274f052SJens Axboe } 2415274f052SJens Axboe 2423a326a2cSIngo Molnar pipe->waiting_writers++; 2433a326a2cSIngo Molnar pipe_wait(pipe); 2443a326a2cSIngo Molnar pipe->waiting_writers--; 2455274f052SJens Axboe } 2465274f052SJens Axboe 2473a326a2cSIngo Molnar if (pipe->inode) 2483a326a2cSIngo Molnar mutex_unlock(&pipe->inode->i_mutex); 2495274f052SJens Axboe 2505274f052SJens Axboe if (do_wakeup) { 251c0bd1f65SJens Axboe smp_mb(); 2523a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 2533a326a2cSIngo Molnar wake_up_interruptible(&pipe->wait); 2543a326a2cSIngo Molnar kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 2555274f052SJens Axboe } 2565274f052SJens Axboe 257912d35f8SJens Axboe while (page_nr < spd->nr_pages) 258912d35f8SJens Axboe page_cache_release(spd->pages[page_nr++]); 2595274f052SJens Axboe 2605274f052SJens Axboe return ret; 2615274f052SJens Axboe } 2625274f052SJens Axboe 2633a326a2cSIngo Molnar static int 264cbb7e577SJens Axboe __generic_file_splice_read(struct file *in, loff_t *ppos, 265cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 266cbb7e577SJens Axboe unsigned int flags) 2675274f052SJens Axboe { 2685274f052SJens Axboe struct address_space *mapping = in->f_mapping; 269912d35f8SJens Axboe unsigned int loff, nr_pages; 27016c523ddSJens Axboe struct page *pages[PIPE_BUFFERS]; 271912d35f8SJens Axboe struct partial_page partial[PIPE_BUFFERS]; 2725274f052SJens Axboe struct page *page; 27391ad66efSJens Axboe pgoff_t index, end_index; 27491ad66efSJens Axboe loff_t isize; 275912d35f8SJens Axboe size_t total_len; 276eb20796bSJens Axboe int error, page_nr; 277912d35f8SJens Axboe struct splice_pipe_desc spd = { 278912d35f8SJens Axboe .pages = pages, 279912d35f8SJens Axboe .partial = partial, 280912d35f8SJens Axboe .flags = flags, 281912d35f8SJens Axboe .ops = &page_cache_pipe_buf_ops, 282912d35f8SJens Axboe }; 2835274f052SJens Axboe 284cbb7e577SJens Axboe index = *ppos >> PAGE_CACHE_SHIFT; 285912d35f8SJens Axboe loff = *ppos & ~PAGE_CACHE_MASK; 286912d35f8SJens Axboe nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2875274f052SJens Axboe 2885274f052SJens Axboe if (nr_pages > PIPE_BUFFERS) 2895274f052SJens Axboe nr_pages = PIPE_BUFFERS; 2905274f052SJens Axboe 2915274f052SJens Axboe /* 29273d62d83SIngo Molnar * Initiate read-ahead on this page range. however, don't call into 2930b749ce3SJens Axboe * read-ahead if this is a non-zero offset (we are likely doing small 2940b749ce3SJens Axboe * chunk splice and the page is already there) for a single page. 2955274f052SJens Axboe */ 296eb645a24SJens Axboe if (!loff || nr_pages > 1) 297eb645a24SJens Axboe page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages); 2985274f052SJens Axboe 2995274f052SJens Axboe /* 30073d62d83SIngo Molnar * Now fill in the holes: 3015274f052SJens Axboe */ 3027480a904SJens Axboe error = 0; 303912d35f8SJens Axboe total_len = 0; 30482aa5d61SJens Axboe 30582aa5d61SJens Axboe /* 306eb20796bSJens Axboe * Lookup the (hopefully) full range of pages we need. 30782aa5d61SJens Axboe */ 308eb20796bSJens Axboe spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); 309eb20796bSJens Axboe 3105274f052SJens Axboe /* 311eb20796bSJens Axboe * If find_get_pages_contig() returned fewer pages than we needed, 312eb20796bSJens Axboe * allocate the rest. 313eb20796bSJens Axboe */ 314eb20796bSJens Axboe index += spd.nr_pages; 315eb20796bSJens Axboe while (spd.nr_pages < nr_pages) { 316eb20796bSJens Axboe /* 317eb20796bSJens Axboe * Page could be there, find_get_pages_contig() breaks on 318eb20796bSJens Axboe * the first hole. 3195274f052SJens Axboe */ 3207480a904SJens Axboe page = find_get_page(mapping, index); 3217480a904SJens Axboe if (!page) { 3227480a904SJens Axboe /* 323e27dedd8SJens Axboe * Make sure the read-ahead engine is notified 324e27dedd8SJens Axboe * about this failure. 325e27dedd8SJens Axboe */ 326e27dedd8SJens Axboe handle_ra_miss(mapping, &in->f_ra, index); 327e27dedd8SJens Axboe 328e27dedd8SJens Axboe /* 329eb20796bSJens Axboe * page didn't exist, allocate one. 3307480a904SJens Axboe */ 3317480a904SJens Axboe page = page_cache_alloc_cold(mapping); 3325274f052SJens Axboe if (!page) 3335274f052SJens Axboe break; 3345274f052SJens Axboe 3357480a904SJens Axboe error = add_to_page_cache_lru(page, mapping, index, 3367480a904SJens Axboe mapping_gfp_mask(mapping)); 3375274f052SJens Axboe if (unlikely(error)) { 3385274f052SJens Axboe page_cache_release(page); 339a0548871SJens Axboe if (error == -EEXIST) 340a0548871SJens Axboe continue; 3415274f052SJens Axboe break; 3425274f052SJens Axboe } 343eb20796bSJens Axboe /* 344eb20796bSJens Axboe * add_to_page_cache() locks the page, unlock it 345eb20796bSJens Axboe * to avoid convoluting the logic below even more. 346eb20796bSJens Axboe */ 347eb20796bSJens Axboe unlock_page(page); 3485274f052SJens Axboe } 3497480a904SJens Axboe 350eb20796bSJens Axboe pages[spd.nr_pages++] = page; 351eb20796bSJens Axboe index++; 352eb20796bSJens Axboe } 353eb20796bSJens Axboe 354eb20796bSJens Axboe /* 355eb20796bSJens Axboe * Now loop over the map and see if we need to start IO on any 356eb20796bSJens Axboe * pages, fill in the partial map, etc. 357eb20796bSJens Axboe */ 358eb20796bSJens Axboe index = *ppos >> PAGE_CACHE_SHIFT; 359eb20796bSJens Axboe nr_pages = spd.nr_pages; 360eb20796bSJens Axboe spd.nr_pages = 0; 361eb20796bSJens Axboe for (page_nr = 0; page_nr < nr_pages; page_nr++) { 362eb20796bSJens Axboe unsigned int this_len; 363eb20796bSJens Axboe 364eb20796bSJens Axboe if (!len) 365eb20796bSJens Axboe break; 366eb20796bSJens Axboe 367eb20796bSJens Axboe /* 368eb20796bSJens Axboe * this_len is the max we'll use from this page 369eb20796bSJens Axboe */ 370eb20796bSJens Axboe this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 371eb20796bSJens Axboe page = pages[page_nr]; 372eb20796bSJens Axboe 3737480a904SJens Axboe /* 3747480a904SJens Axboe * If the page isn't uptodate, we may need to start io on it 3757480a904SJens Axboe */ 3767480a904SJens Axboe if (!PageUptodate(page)) { 377c4f895cbSJens Axboe /* 378c4f895cbSJens Axboe * If in nonblock mode then dont block on waiting 379c4f895cbSJens Axboe * for an in-flight io page 380c4f895cbSJens Axboe */ 381c4f895cbSJens Axboe if (flags & SPLICE_F_NONBLOCK) 382c4f895cbSJens Axboe break; 383c4f895cbSJens Axboe 3847480a904SJens Axboe lock_page(page); 3857480a904SJens Axboe 3867480a904SJens Axboe /* 3877480a904SJens Axboe * page was truncated, stop here. if this isn't the 3887480a904SJens Axboe * first page, we'll just complete what we already 3897480a904SJens Axboe * added 3907480a904SJens Axboe */ 3917480a904SJens Axboe if (!page->mapping) { 3927480a904SJens Axboe unlock_page(page); 3937480a904SJens Axboe break; 3947480a904SJens Axboe } 3957480a904SJens Axboe /* 3967480a904SJens Axboe * page was already under io and is now done, great 3977480a904SJens Axboe */ 3987480a904SJens Axboe if (PageUptodate(page)) { 3997480a904SJens Axboe unlock_page(page); 4007480a904SJens Axboe goto fill_it; 4017480a904SJens Axboe } 4027480a904SJens Axboe 4037480a904SJens Axboe /* 4047480a904SJens Axboe * need to read in the page 4057480a904SJens Axboe */ 4067480a904SJens Axboe error = mapping->a_ops->readpage(in, page); 4077480a904SJens Axboe if (unlikely(error)) { 408eb20796bSJens Axboe /* 409eb20796bSJens Axboe * We really should re-lookup the page here, 410eb20796bSJens Axboe * but it complicates things a lot. Instead 411eb20796bSJens Axboe * lets just do what we already stored, and 412eb20796bSJens Axboe * we'll get it the next time we are called. 413eb20796bSJens Axboe */ 4147480a904SJens Axboe if (error == AOP_TRUNCATED_PAGE) 415eb20796bSJens Axboe error = 0; 416eb20796bSJens Axboe 4177480a904SJens Axboe break; 4187480a904SJens Axboe } 41991ad66efSJens Axboe 42091ad66efSJens Axboe /* 42191ad66efSJens Axboe * i_size must be checked after ->readpage(). 42291ad66efSJens Axboe */ 42391ad66efSJens Axboe isize = i_size_read(mapping->host); 42491ad66efSJens Axboe end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 425eb20796bSJens Axboe if (unlikely(!isize || index > end_index)) 42691ad66efSJens Axboe break; 42791ad66efSJens Axboe 42891ad66efSJens Axboe /* 42991ad66efSJens Axboe * if this is the last page, see if we need to shrink 43091ad66efSJens Axboe * the length and stop 43191ad66efSJens Axboe */ 43291ad66efSJens Axboe if (end_index == index) { 43391ad66efSJens Axboe loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); 434eb20796bSJens Axboe if (total_len + loff > isize) 43591ad66efSJens Axboe break; 43691ad66efSJens Axboe /* 43791ad66efSJens Axboe * force quit after adding this page 43891ad66efSJens Axboe */ 439eb20796bSJens Axboe len = this_len; 44082aa5d61SJens Axboe this_len = min(this_len, loff); 441912d35f8SJens Axboe loff = 0; 44291ad66efSJens Axboe } 4437480a904SJens Axboe } 4447480a904SJens Axboe fill_it: 445eb20796bSJens Axboe partial[page_nr].offset = loff; 446eb20796bSJens Axboe partial[page_nr].len = this_len; 44782aa5d61SJens Axboe len -= this_len; 448912d35f8SJens Axboe total_len += this_len; 44991ad66efSJens Axboe loff = 0; 450eb20796bSJens Axboe spd.nr_pages++; 451eb20796bSJens Axboe index++; 4525274f052SJens Axboe } 4535274f052SJens Axboe 454eb20796bSJens Axboe /* 455eb20796bSJens Axboe * Release any pages at the end, if we quit early. 'i' is how far 456eb20796bSJens Axboe * we got, 'nr_pages' is how many pages are in the map. 457eb20796bSJens Axboe */ 458eb20796bSJens Axboe while (page_nr < nr_pages) 459eb20796bSJens Axboe page_cache_release(pages[page_nr++]); 460eb20796bSJens Axboe 461912d35f8SJens Axboe if (spd.nr_pages) 46200522fb4SJens Axboe return splice_to_pipe(pipe, &spd); 46316c523ddSJens Axboe 4647480a904SJens Axboe return error; 4655274f052SJens Axboe } 4665274f052SJens Axboe 46783f9135bSJens Axboe /** 46883f9135bSJens Axboe * generic_file_splice_read - splice data from file to a pipe 46983f9135bSJens Axboe * @in: file to splice from 47083f9135bSJens Axboe * @pipe: pipe to splice to 47183f9135bSJens Axboe * @len: number of bytes to splice 47283f9135bSJens Axboe * @flags: splice modifier flags 47383f9135bSJens Axboe * 47483f9135bSJens Axboe * Will read pages from given file and fill them into a pipe. 47583f9135bSJens Axboe */ 476cbb7e577SJens Axboe ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, 477cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 478cbb7e577SJens Axboe unsigned int flags) 4795274f052SJens Axboe { 4805274f052SJens Axboe ssize_t spliced; 4815274f052SJens Axboe int ret; 4825274f052SJens Axboe 4835274f052SJens Axboe ret = 0; 4845274f052SJens Axboe spliced = 0; 4853a326a2cSIngo Molnar 4865274f052SJens Axboe while (len) { 487cbb7e577SJens Axboe ret = __generic_file_splice_read(in, ppos, pipe, len, flags); 4885274f052SJens Axboe 489c4f895cbSJens Axboe if (ret < 0) 4905274f052SJens Axboe break; 491c4f895cbSJens Axboe else if (!ret) { 492c4f895cbSJens Axboe if (spliced) 493c4f895cbSJens Axboe break; 494c4f895cbSJens Axboe if (flags & SPLICE_F_NONBLOCK) { 495c4f895cbSJens Axboe ret = -EAGAIN; 496c4f895cbSJens Axboe break; 497c4f895cbSJens Axboe } 498c4f895cbSJens Axboe } 4995274f052SJens Axboe 500cbb7e577SJens Axboe *ppos += ret; 5015274f052SJens Axboe len -= ret; 5025274f052SJens Axboe spliced += ret; 5035274f052SJens Axboe } 5045274f052SJens Axboe 5055274f052SJens Axboe if (spliced) 5065274f052SJens Axboe return spliced; 5075274f052SJens Axboe 5085274f052SJens Axboe return ret; 5095274f052SJens Axboe } 5105274f052SJens Axboe 511059a8f37SJens Axboe EXPORT_SYMBOL(generic_file_splice_read); 512059a8f37SJens Axboe 5135274f052SJens Axboe /* 5144f6f0bd2SJens Axboe * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 515016b661eSJens Axboe * using sendpage(). Return the number of bytes sent. 5165274f052SJens Axboe */ 51776ad4d11SJens Axboe static int pipe_to_sendpage(struct pipe_inode_info *pipe, 5185274f052SJens Axboe struct pipe_buffer *buf, struct splice_desc *sd) 5195274f052SJens Axboe { 5205274f052SJens Axboe struct file *file = sd->file; 5215274f052SJens Axboe loff_t pos = sd->pos; 522f84d7519SJens Axboe int ret, more; 5235274f052SJens Axboe 52476ad4d11SJens Axboe ret = buf->ops->pin(pipe, buf); 525f84d7519SJens Axboe if (!ret) { 526b2b39fa4SJens Axboe more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; 5275274f052SJens Axboe 528f84d7519SJens Axboe ret = file->f_op->sendpage(file, buf->page, buf->offset, 529f84d7519SJens Axboe sd->len, &pos, more); 530f84d7519SJens Axboe } 5315274f052SJens Axboe 532016b661eSJens Axboe return ret; 5335274f052SJens Axboe } 5345274f052SJens Axboe 5355274f052SJens Axboe /* 5365274f052SJens Axboe * This is a little more tricky than the file -> pipe splicing. There are 5375274f052SJens Axboe * basically three cases: 5385274f052SJens Axboe * 5395274f052SJens Axboe * - Destination page already exists in the address space and there 5405274f052SJens Axboe * are users of it. For that case we have no other option that 5415274f052SJens Axboe * copying the data. Tough luck. 5425274f052SJens Axboe * - Destination page already exists in the address space, but there 5435274f052SJens Axboe * are no users of it. Make sure it's uptodate, then drop it. Fall 5445274f052SJens Axboe * through to last case. 5455274f052SJens Axboe * - Destination page does not exist, we can add the pipe page to 5465274f052SJens Axboe * the page cache and avoid the copy. 5475274f052SJens Axboe * 54883f9135bSJens Axboe * If asked to move pages to the output file (SPLICE_F_MOVE is set in 54983f9135bSJens Axboe * sd->flags), we attempt to migrate pages from the pipe to the output 55083f9135bSJens Axboe * file address space page cache. This is possible if no one else has 55183f9135bSJens Axboe * the pipe page referenced outside of the pipe and page cache. If 55283f9135bSJens Axboe * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 55383f9135bSJens Axboe * a new page in the output file page cache and fill/dirty that. 5545274f052SJens Axboe */ 55576ad4d11SJens Axboe static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 5565274f052SJens Axboe struct splice_desc *sd) 5575274f052SJens Axboe { 5585274f052SJens Axboe struct file *file = sd->file; 5595274f052SJens Axboe struct address_space *mapping = file->f_mapping; 5603e7ee3e7SJens Axboe gfp_t gfp_mask = mapping_gfp_mask(mapping); 561016b661eSJens Axboe unsigned int offset, this_len; 5625274f052SJens Axboe struct page *page; 5635274f052SJens Axboe pgoff_t index; 5643e7ee3e7SJens Axboe int ret; 5655274f052SJens Axboe 5665274f052SJens Axboe /* 56749d0b21bSJens Axboe * make sure the data in this buffer is uptodate 5685274f052SJens Axboe */ 56976ad4d11SJens Axboe ret = buf->ops->pin(pipe, buf); 570f84d7519SJens Axboe if (unlikely(ret)) 571f84d7519SJens Axboe return ret; 5725274f052SJens Axboe 5735274f052SJens Axboe index = sd->pos >> PAGE_CACHE_SHIFT; 5745274f052SJens Axboe offset = sd->pos & ~PAGE_CACHE_MASK; 5755274f052SJens Axboe 576016b661eSJens Axboe this_len = sd->len; 577016b661eSJens Axboe if (this_len + offset > PAGE_CACHE_SIZE) 578016b661eSJens Axboe this_len = PAGE_CACHE_SIZE - offset; 579016b661eSJens Axboe 5805abc97aaSJens Axboe /* 5810568b409SJens Axboe * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full 5820568b409SJens Axboe * page. 5835abc97aaSJens Axboe */ 5840568b409SJens Axboe if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { 58583f9135bSJens Axboe /* 5861432873aSJens Axboe * If steal succeeds, buf->page is now pruned from the 5871432873aSJens Axboe * pagecache and we can reuse it. The page will also be 5881432873aSJens Axboe * locked on successful return. 58983f9135bSJens Axboe */ 59076ad4d11SJens Axboe if (buf->ops->steal(pipe, buf)) 5915abc97aaSJens Axboe goto find_page; 5925abc97aaSJens Axboe 5935abc97aaSJens Axboe page = buf->page; 59446e678c9SJens Axboe if (add_to_page_cache(page, mapping, index, gfp_mask)) { 59546e678c9SJens Axboe unlock_page(page); 5965abc97aaSJens Axboe goto find_page; 59746e678c9SJens Axboe } 5981432873aSJens Axboe 5991432873aSJens Axboe page_cache_get(page); 6001432873aSJens Axboe 6011432873aSJens Axboe if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 6021432873aSJens Axboe lru_cache_add(page); 6035abc97aaSJens Axboe } else { 6045274f052SJens Axboe find_page: 6059e0267c2SJens Axboe page = find_lock_page(mapping, index); 6069e0267c2SJens Axboe if (!page) { 6075274f052SJens Axboe ret = -ENOMEM; 6089e0267c2SJens Axboe page = page_cache_alloc_cold(mapping); 6099e0267c2SJens Axboe if (unlikely(!page)) 610e6e80f29SJens Axboe goto out_ret; 6115274f052SJens Axboe 6125274f052SJens Axboe /* 6139e0267c2SJens Axboe * This will also lock the page 6149e0267c2SJens Axboe */ 6159e0267c2SJens Axboe ret = add_to_page_cache_lru(page, mapping, index, 6169e0267c2SJens Axboe gfp_mask); 6179e0267c2SJens Axboe if (unlikely(ret)) 6189e0267c2SJens Axboe goto out; 6199e0267c2SJens Axboe } 6209e0267c2SJens Axboe 6219e0267c2SJens Axboe /* 6229e0267c2SJens Axboe * We get here with the page locked. If the page is also 6239e0267c2SJens Axboe * uptodate, we don't need to do more. If it isn't, we 6249e0267c2SJens Axboe * may need to bring it in if we are not going to overwrite 6259e0267c2SJens Axboe * the full page. 6265274f052SJens Axboe */ 6275274f052SJens Axboe if (!PageUptodate(page)) { 628016b661eSJens Axboe if (this_len < PAGE_CACHE_SIZE) { 6295274f052SJens Axboe ret = mapping->a_ops->readpage(file, page); 6305274f052SJens Axboe if (unlikely(ret)) 6315274f052SJens Axboe goto out; 6325274f052SJens Axboe 6335274f052SJens Axboe lock_page(page); 6345274f052SJens Axboe 6355274f052SJens Axboe if (!PageUptodate(page)) { 6365274f052SJens Axboe /* 63773d62d83SIngo Molnar * Page got invalidated, repeat. 6385274f052SJens Axboe */ 6395274f052SJens Axboe if (!page->mapping) { 6405274f052SJens Axboe unlock_page(page); 6415274f052SJens Axboe page_cache_release(page); 6425274f052SJens Axboe goto find_page; 6435274f052SJens Axboe } 6445274f052SJens Axboe ret = -EIO; 6455274f052SJens Axboe goto out; 6465274f052SJens Axboe } 6479e0267c2SJens Axboe } else 6485274f052SJens Axboe SetPageUptodate(page); 6495274f052SJens Axboe } 6505274f052SJens Axboe } 6515274f052SJens Axboe 652016b661eSJens Axboe ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); 653bfc4ee39SJens Axboe if (unlikely(ret)) { 654bfc4ee39SJens Axboe loff_t isize = i_size_read(mapping->host); 655bfc4ee39SJens Axboe 656bfc4ee39SJens Axboe if (ret != AOP_TRUNCATED_PAGE) 657bfc4ee39SJens Axboe unlock_page(page); 6584f6f0bd2SJens Axboe page_cache_release(page); 659bfc4ee39SJens Axboe if (ret == AOP_TRUNCATED_PAGE) 6604f6f0bd2SJens Axboe goto find_page; 661bfc4ee39SJens Axboe 662bfc4ee39SJens Axboe /* 663bfc4ee39SJens Axboe * prepare_write() may have instantiated a few blocks 664bfc4ee39SJens Axboe * outside i_size. Trim these off again. 665bfc4ee39SJens Axboe */ 666bfc4ee39SJens Axboe if (sd->pos + this_len > isize) 667bfc4ee39SJens Axboe vmtruncate(mapping->host, isize); 668bfc4ee39SJens Axboe 669e6e80f29SJens Axboe goto out_ret; 670bfc4ee39SJens Axboe } 6715274f052SJens Axboe 6720568b409SJens Axboe if (buf->page != page) { 673f84d7519SJens Axboe /* 674f84d7519SJens Axboe * Careful, ->map() uses KM_USER0! 675f84d7519SJens Axboe */ 67676ad4d11SJens Axboe char *src = buf->ops->map(pipe, buf, 1); 677f84d7519SJens Axboe char *dst = kmap_atomic(page, KM_USER1); 6785abc97aaSJens Axboe 679016b661eSJens Axboe memcpy(dst + offset, src + buf->offset, this_len); 6805274f052SJens Axboe flush_dcache_page(page); 681f84d7519SJens Axboe kunmap_atomic(dst, KM_USER1); 68276ad4d11SJens Axboe buf->ops->unmap(pipe, buf, src); 6835abc97aaSJens Axboe } 6845274f052SJens Axboe 685016b661eSJens Axboe ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); 6860568b409SJens Axboe if (!ret) { 687016b661eSJens Axboe /* 6880568b409SJens Axboe * Return the number of bytes written and mark page as 6890568b409SJens Axboe * accessed, we are now done! 690016b661eSJens Axboe */ 691016b661eSJens Axboe ret = this_len; 692c7f21e4fSJens Axboe mark_page_accessed(page); 6934f6f0bd2SJens Axboe balance_dirty_pages_ratelimited(mapping); 6940568b409SJens Axboe } else if (ret == AOP_TRUNCATED_PAGE) { 6955274f052SJens Axboe page_cache_release(page); 6960568b409SJens Axboe goto find_page; 6970568b409SJens Axboe } 6980568b409SJens Axboe out: 6990568b409SJens Axboe page_cache_release(page); 7004f6f0bd2SJens Axboe unlock_page(page); 701e6e80f29SJens Axboe out_ret: 7025274f052SJens Axboe return ret; 7035274f052SJens Axboe } 7045274f052SJens Axboe 70583f9135bSJens Axboe /* 70683f9135bSJens Axboe * Pipe input worker. Most of this logic works like a regular pipe, the 70783f9135bSJens Axboe * key here is the 'actor' worker passed in that actually moves the data 70883f9135bSJens Axboe * to the wanted destination. See pipe_to_file/pipe_to_sendpage above. 70983f9135bSJens Axboe */ 7106da61809SMark Fasheh static ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, 7116da61809SMark Fasheh struct file *out, loff_t *ppos, size_t len, 7126da61809SMark Fasheh unsigned int flags, splice_actor *actor) 7135274f052SJens Axboe { 7145274f052SJens Axboe int ret, do_wakeup, err; 7155274f052SJens Axboe struct splice_desc sd; 7165274f052SJens Axboe 7175274f052SJens Axboe ret = 0; 7185274f052SJens Axboe do_wakeup = 0; 7195274f052SJens Axboe 7205274f052SJens Axboe sd.total_len = len; 7215274f052SJens Axboe sd.flags = flags; 7225274f052SJens Axboe sd.file = out; 723cbb7e577SJens Axboe sd.pos = *ppos; 7245274f052SJens Axboe 7255274f052SJens Axboe for (;;) { 7266f767b04SJens Axboe if (pipe->nrbufs) { 7276f767b04SJens Axboe struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 7285274f052SJens Axboe struct pipe_buf_operations *ops = buf->ops; 7295274f052SJens Axboe 7305274f052SJens Axboe sd.len = buf->len; 7315274f052SJens Axboe if (sd.len > sd.total_len) 7325274f052SJens Axboe sd.len = sd.total_len; 7335274f052SJens Axboe 7343a326a2cSIngo Molnar err = actor(pipe, buf, &sd); 735016b661eSJens Axboe if (err <= 0) { 7365274f052SJens Axboe if (!ret && err != -ENODATA) 7375274f052SJens Axboe ret = err; 7385274f052SJens Axboe 7395274f052SJens Axboe break; 7405274f052SJens Axboe } 7415274f052SJens Axboe 742016b661eSJens Axboe ret += err; 743016b661eSJens Axboe buf->offset += err; 744016b661eSJens Axboe buf->len -= err; 745016b661eSJens Axboe 746016b661eSJens Axboe sd.len -= err; 747016b661eSJens Axboe sd.pos += err; 748016b661eSJens Axboe sd.total_len -= err; 749016b661eSJens Axboe if (sd.len) 750016b661eSJens Axboe continue; 75173d62d83SIngo Molnar 7525274f052SJens Axboe if (!buf->len) { 7535274f052SJens Axboe buf->ops = NULL; 7543a326a2cSIngo Molnar ops->release(pipe, buf); 7556f767b04SJens Axboe pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1); 7566f767b04SJens Axboe pipe->nrbufs--; 7576f767b04SJens Axboe if (pipe->inode) 7585274f052SJens Axboe do_wakeup = 1; 7595274f052SJens Axboe } 7605274f052SJens Axboe 7615274f052SJens Axboe if (!sd.total_len) 7625274f052SJens Axboe break; 7635274f052SJens Axboe } 7645274f052SJens Axboe 7656f767b04SJens Axboe if (pipe->nrbufs) 7665274f052SJens Axboe continue; 7673a326a2cSIngo Molnar if (!pipe->writers) 7685274f052SJens Axboe break; 7693a326a2cSIngo Molnar if (!pipe->waiting_writers) { 7705274f052SJens Axboe if (ret) 7715274f052SJens Axboe break; 7725274f052SJens Axboe } 7735274f052SJens Axboe 77429e35094SLinus Torvalds if (flags & SPLICE_F_NONBLOCK) { 77529e35094SLinus Torvalds if (!ret) 77629e35094SLinus Torvalds ret = -EAGAIN; 77729e35094SLinus Torvalds break; 77829e35094SLinus Torvalds } 77929e35094SLinus Torvalds 7805274f052SJens Axboe if (signal_pending(current)) { 7815274f052SJens Axboe if (!ret) 7825274f052SJens Axboe ret = -ERESTARTSYS; 7835274f052SJens Axboe break; 7845274f052SJens Axboe } 7855274f052SJens Axboe 7865274f052SJens Axboe if (do_wakeup) { 787c0bd1f65SJens Axboe smp_mb(); 7883a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 7893a326a2cSIngo Molnar wake_up_interruptible_sync(&pipe->wait); 7903a326a2cSIngo Molnar kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 7915274f052SJens Axboe do_wakeup = 0; 7925274f052SJens Axboe } 7935274f052SJens Axboe 7943a326a2cSIngo Molnar pipe_wait(pipe); 7955274f052SJens Axboe } 7965274f052SJens Axboe 7975274f052SJens Axboe if (do_wakeup) { 798c0bd1f65SJens Axboe smp_mb(); 7993a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 8003a326a2cSIngo Molnar wake_up_interruptible(&pipe->wait); 8013a326a2cSIngo Molnar kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 8025274f052SJens Axboe } 8035274f052SJens Axboe 8045274f052SJens Axboe return ret; 8055274f052SJens Axboe } 8065274f052SJens Axboe 8076da61809SMark Fasheh ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, 8086da61809SMark Fasheh loff_t *ppos, size_t len, unsigned int flags, 8096da61809SMark Fasheh splice_actor *actor) 8106da61809SMark Fasheh { 8116da61809SMark Fasheh ssize_t ret; 8126da61809SMark Fasheh struct inode *inode = out->f_mapping->host; 8136da61809SMark Fasheh 8146da61809SMark Fasheh /* 8156da61809SMark Fasheh * The actor worker might be calling ->prepare_write and 8166da61809SMark Fasheh * ->commit_write. Most of the time, these expect i_mutex to 8176da61809SMark Fasheh * be held. Since this may result in an ABBA deadlock with 8186da61809SMark Fasheh * pipe->inode, we have to order lock acquiry here. 8196da61809SMark Fasheh */ 8206da61809SMark Fasheh inode_double_lock(inode, pipe->inode); 8216da61809SMark Fasheh ret = __splice_from_pipe(pipe, out, ppos, len, flags, actor); 8226da61809SMark Fasheh inode_double_unlock(inode, pipe->inode); 8236da61809SMark Fasheh 8246da61809SMark Fasheh return ret; 8256da61809SMark Fasheh } 8266da61809SMark Fasheh 8276da61809SMark Fasheh /** 8286da61809SMark Fasheh * generic_file_splice_write_nolock - generic_file_splice_write without mutexes 8296da61809SMark Fasheh * @pipe: pipe info 8306da61809SMark Fasheh * @out: file to write to 8316da61809SMark Fasheh * @len: number of bytes to splice 8326da61809SMark Fasheh * @flags: splice modifier flags 8336da61809SMark Fasheh * 8346da61809SMark Fasheh * Will either move or copy pages (determined by @flags options) from 8356da61809SMark Fasheh * the given pipe inode to the given file. The caller is responsible 8366da61809SMark Fasheh * for acquiring i_mutex on both inodes. 8376da61809SMark Fasheh * 8386da61809SMark Fasheh */ 8396da61809SMark Fasheh ssize_t 8406da61809SMark Fasheh generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, 8416da61809SMark Fasheh loff_t *ppos, size_t len, unsigned int flags) 8426da61809SMark Fasheh { 8436da61809SMark Fasheh struct address_space *mapping = out->f_mapping; 8446da61809SMark Fasheh struct inode *inode = mapping->host; 8456da61809SMark Fasheh ssize_t ret; 8466da61809SMark Fasheh int err; 8476da61809SMark Fasheh 8488c34e2d6SJens Axboe err = remove_suid(out->f_dentry); 8498c34e2d6SJens Axboe if (unlikely(err)) 8508c34e2d6SJens Axboe return err; 8518c34e2d6SJens Axboe 8526da61809SMark Fasheh ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); 8536da61809SMark Fasheh if (ret > 0) { 8546da61809SMark Fasheh *ppos += ret; 8556da61809SMark Fasheh 8566da61809SMark Fasheh /* 8576da61809SMark Fasheh * If file or inode is SYNC and we actually wrote some data, 8586da61809SMark Fasheh * sync it. 8596da61809SMark Fasheh */ 8606da61809SMark Fasheh if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { 8616da61809SMark Fasheh err = generic_osync_inode(inode, mapping, 8626da61809SMark Fasheh OSYNC_METADATA|OSYNC_DATA); 8636da61809SMark Fasheh 8646da61809SMark Fasheh if (err) 8656da61809SMark Fasheh ret = err; 8666da61809SMark Fasheh } 8676da61809SMark Fasheh } 8686da61809SMark Fasheh 8696da61809SMark Fasheh return ret; 8706da61809SMark Fasheh } 8716da61809SMark Fasheh 8726da61809SMark Fasheh EXPORT_SYMBOL(generic_file_splice_write_nolock); 8736da61809SMark Fasheh 87483f9135bSJens Axboe /** 87583f9135bSJens Axboe * generic_file_splice_write - splice data from a pipe to a file 8763a326a2cSIngo Molnar * @pipe: pipe info 87783f9135bSJens Axboe * @out: file to write to 87883f9135bSJens Axboe * @len: number of bytes to splice 87983f9135bSJens Axboe * @flags: splice modifier flags 88083f9135bSJens Axboe * 88183f9135bSJens Axboe * Will either move or copy pages (determined by @flags options) from 88283f9135bSJens Axboe * the given pipe inode to the given file. 88383f9135bSJens Axboe * 88483f9135bSJens Axboe */ 8853a326a2cSIngo Molnar ssize_t 8863a326a2cSIngo Molnar generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, 887cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 8885274f052SJens Axboe { 8894f6f0bd2SJens Axboe struct address_space *mapping = out->f_mapping; 8908c34e2d6SJens Axboe struct inode *inode = mapping->host; 8913a326a2cSIngo Molnar ssize_t ret; 8928c34e2d6SJens Axboe int err; 8938c34e2d6SJens Axboe 8948c34e2d6SJens Axboe err = should_remove_suid(out->f_dentry); 8958c34e2d6SJens Axboe if (unlikely(err)) { 8968c34e2d6SJens Axboe mutex_lock(&inode->i_mutex); 8978c34e2d6SJens Axboe err = __remove_suid(out->f_dentry, err); 8988c34e2d6SJens Axboe mutex_unlock(&inode->i_mutex); 8998c34e2d6SJens Axboe if (err) 9008c34e2d6SJens Axboe return err; 9018c34e2d6SJens Axboe } 9023a326a2cSIngo Molnar 90300522fb4SJens Axboe ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); 904a4514ebdSJens Axboe if (ret > 0) { 905a4514ebdSJens Axboe *ppos += ret; 9064f6f0bd2SJens Axboe 9074f6f0bd2SJens Axboe /* 908a4514ebdSJens Axboe * If file or inode is SYNC and we actually wrote some data, 909a4514ebdSJens Axboe * sync it. 9104f6f0bd2SJens Axboe */ 911a4514ebdSJens Axboe if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { 9124f6f0bd2SJens Axboe mutex_lock(&inode->i_mutex); 913a4514ebdSJens Axboe err = generic_osync_inode(inode, mapping, 9144f6f0bd2SJens Axboe OSYNC_METADATA|OSYNC_DATA); 9154f6f0bd2SJens Axboe mutex_unlock(&inode->i_mutex); 9164f6f0bd2SJens Axboe 9174f6f0bd2SJens Axboe if (err) 9184f6f0bd2SJens Axboe ret = err; 9194f6f0bd2SJens Axboe } 920a4514ebdSJens Axboe } 9214f6f0bd2SJens Axboe 9224f6f0bd2SJens Axboe return ret; 9235274f052SJens Axboe } 9245274f052SJens Axboe 925059a8f37SJens Axboe EXPORT_SYMBOL(generic_file_splice_write); 926059a8f37SJens Axboe 92783f9135bSJens Axboe /** 92883f9135bSJens Axboe * generic_splice_sendpage - splice data from a pipe to a socket 92983f9135bSJens Axboe * @inode: pipe inode 93083f9135bSJens Axboe * @out: socket to write to 93183f9135bSJens Axboe * @len: number of bytes to splice 93283f9135bSJens Axboe * @flags: splice modifier flags 93383f9135bSJens Axboe * 93483f9135bSJens Axboe * Will send @len bytes from the pipe to a network socket. No data copying 93583f9135bSJens Axboe * is involved. 93683f9135bSJens Axboe * 93783f9135bSJens Axboe */ 9383a326a2cSIngo Molnar ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, 939cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 9405274f052SJens Axboe { 94100522fb4SJens Axboe return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); 9425274f052SJens Axboe } 9435274f052SJens Axboe 944059a8f37SJens Axboe EXPORT_SYMBOL(generic_splice_sendpage); 945a0f06780SJeff Garzik 94683f9135bSJens Axboe /* 94783f9135bSJens Axboe * Attempt to initiate a splice from pipe to file. 94883f9135bSJens Axboe */ 9493a326a2cSIngo Molnar static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 950cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 9515274f052SJens Axboe { 9525274f052SJens Axboe int ret; 9535274f052SJens Axboe 95449570e9bSJens Axboe if (unlikely(!out->f_op || !out->f_op->splice_write)) 9555274f052SJens Axboe return -EINVAL; 9565274f052SJens Axboe 95749570e9bSJens Axboe if (unlikely(!(out->f_mode & FMODE_WRITE))) 9585274f052SJens Axboe return -EBADF; 9595274f052SJens Axboe 960cbb7e577SJens Axboe ret = rw_verify_area(WRITE, out, ppos, len); 9615274f052SJens Axboe if (unlikely(ret < 0)) 9625274f052SJens Axboe return ret; 9635274f052SJens Axboe 964cbb7e577SJens Axboe return out->f_op->splice_write(pipe, out, ppos, len, flags); 9655274f052SJens Axboe } 9665274f052SJens Axboe 96783f9135bSJens Axboe /* 96883f9135bSJens Axboe * Attempt to initiate a splice from a file to a pipe. 96983f9135bSJens Axboe */ 970cbb7e577SJens Axboe static long do_splice_to(struct file *in, loff_t *ppos, 971cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 972cbb7e577SJens Axboe unsigned int flags) 9735274f052SJens Axboe { 974cbb7e577SJens Axboe loff_t isize, left; 9755274f052SJens Axboe int ret; 9765274f052SJens Axboe 97749570e9bSJens Axboe if (unlikely(!in->f_op || !in->f_op->splice_read)) 9785274f052SJens Axboe return -EINVAL; 9795274f052SJens Axboe 98049570e9bSJens Axboe if (unlikely(!(in->f_mode & FMODE_READ))) 9815274f052SJens Axboe return -EBADF; 9825274f052SJens Axboe 983cbb7e577SJens Axboe ret = rw_verify_area(READ, in, ppos, len); 9845274f052SJens Axboe if (unlikely(ret < 0)) 9855274f052SJens Axboe return ret; 9865274f052SJens Axboe 9875274f052SJens Axboe isize = i_size_read(in->f_mapping->host); 988cbb7e577SJens Axboe if (unlikely(*ppos >= isize)) 9895274f052SJens Axboe return 0; 9905274f052SJens Axboe 991cbb7e577SJens Axboe left = isize - *ppos; 99249570e9bSJens Axboe if (unlikely(left < len)) 9935274f052SJens Axboe len = left; 9945274f052SJens Axboe 995cbb7e577SJens Axboe return in->f_op->splice_read(in, ppos, pipe, len, flags); 9965274f052SJens Axboe } 9975274f052SJens Axboe 998cbb7e577SJens Axboe long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 999cbb7e577SJens Axboe size_t len, unsigned int flags) 1000b92ce558SJens Axboe { 1001b92ce558SJens Axboe struct pipe_inode_info *pipe; 1002b92ce558SJens Axboe long ret, bytes; 1003cbb7e577SJens Axboe loff_t out_off; 1004b92ce558SJens Axboe umode_t i_mode; 1005b92ce558SJens Axboe int i; 1006b92ce558SJens Axboe 1007b92ce558SJens Axboe /* 1008b92ce558SJens Axboe * We require the input being a regular file, as we don't want to 1009b92ce558SJens Axboe * randomly drop data for eg socket -> socket splicing. Use the 1010b92ce558SJens Axboe * piped splicing for that! 1011b92ce558SJens Axboe */ 1012b92ce558SJens Axboe i_mode = in->f_dentry->d_inode->i_mode; 1013b92ce558SJens Axboe if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) 1014b92ce558SJens Axboe return -EINVAL; 1015b92ce558SJens Axboe 1016b92ce558SJens Axboe /* 1017b92ce558SJens Axboe * neither in nor out is a pipe, setup an internal pipe attached to 1018b92ce558SJens Axboe * 'out' and transfer the wanted data from 'in' to 'out' through that 1019b92ce558SJens Axboe */ 1020b92ce558SJens Axboe pipe = current->splice_pipe; 102149570e9bSJens Axboe if (unlikely(!pipe)) { 1022b92ce558SJens Axboe pipe = alloc_pipe_info(NULL); 1023b92ce558SJens Axboe if (!pipe) 1024b92ce558SJens Axboe return -ENOMEM; 1025b92ce558SJens Axboe 1026b92ce558SJens Axboe /* 1027b92ce558SJens Axboe * We don't have an immediate reader, but we'll read the stuff 102800522fb4SJens Axboe * out of the pipe right after the splice_to_pipe(). So set 1029b92ce558SJens Axboe * PIPE_READERS appropriately. 1030b92ce558SJens Axboe */ 1031b92ce558SJens Axboe pipe->readers = 1; 1032b92ce558SJens Axboe 1033b92ce558SJens Axboe current->splice_pipe = pipe; 1034b92ce558SJens Axboe } 1035b92ce558SJens Axboe 1036b92ce558SJens Axboe /* 103773d62d83SIngo Molnar * Do the splice. 1038b92ce558SJens Axboe */ 1039b92ce558SJens Axboe ret = 0; 1040b92ce558SJens Axboe bytes = 0; 1041cbb7e577SJens Axboe out_off = 0; 1042b92ce558SJens Axboe 1043b92ce558SJens Axboe while (len) { 1044b92ce558SJens Axboe size_t read_len, max_read_len; 1045b92ce558SJens Axboe 1046b92ce558SJens Axboe /* 1047b92ce558SJens Axboe * Do at most PIPE_BUFFERS pages worth of transfer: 1048b92ce558SJens Axboe */ 1049b92ce558SJens Axboe max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE)); 1050b92ce558SJens Axboe 1051cbb7e577SJens Axboe ret = do_splice_to(in, ppos, pipe, max_read_len, flags); 1052b92ce558SJens Axboe if (unlikely(ret < 0)) 1053b92ce558SJens Axboe goto out_release; 1054b92ce558SJens Axboe 1055b92ce558SJens Axboe read_len = ret; 1056b92ce558SJens Axboe 1057b92ce558SJens Axboe /* 1058b92ce558SJens Axboe * NOTE: nonblocking mode only applies to the input. We 1059b92ce558SJens Axboe * must not do the output in nonblocking mode as then we 1060b92ce558SJens Axboe * could get stuck data in the internal pipe: 1061b92ce558SJens Axboe */ 1062cbb7e577SJens Axboe ret = do_splice_from(pipe, out, &out_off, read_len, 1063b92ce558SJens Axboe flags & ~SPLICE_F_NONBLOCK); 1064b92ce558SJens Axboe if (unlikely(ret < 0)) 1065b92ce558SJens Axboe goto out_release; 1066b92ce558SJens Axboe 1067b92ce558SJens Axboe bytes += ret; 1068b92ce558SJens Axboe len -= ret; 1069b92ce558SJens Axboe 1070b92ce558SJens Axboe /* 1071b92ce558SJens Axboe * In nonblocking mode, if we got back a short read then 1072b92ce558SJens Axboe * that was due to either an IO error or due to the 1073b92ce558SJens Axboe * pagecache entry not being there. In the IO error case 1074b92ce558SJens Axboe * the _next_ splice attempt will produce a clean IO error 1075b92ce558SJens Axboe * return value (not a short read), so in both cases it's 1076b92ce558SJens Axboe * correct to break out of the loop here: 1077b92ce558SJens Axboe */ 1078b92ce558SJens Axboe if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len)) 1079b92ce558SJens Axboe break; 1080b92ce558SJens Axboe } 1081b92ce558SJens Axboe 1082b92ce558SJens Axboe pipe->nrbufs = pipe->curbuf = 0; 1083b92ce558SJens Axboe 1084b92ce558SJens Axboe return bytes; 1085b92ce558SJens Axboe 1086b92ce558SJens Axboe out_release: 1087b92ce558SJens Axboe /* 1088b92ce558SJens Axboe * If we did an incomplete transfer we must release 1089b92ce558SJens Axboe * the pipe buffers in question: 1090b92ce558SJens Axboe */ 1091b92ce558SJens Axboe for (i = 0; i < PIPE_BUFFERS; i++) { 1092b92ce558SJens Axboe struct pipe_buffer *buf = pipe->bufs + i; 1093b92ce558SJens Axboe 1094b92ce558SJens Axboe if (buf->ops) { 1095b92ce558SJens Axboe buf->ops->release(pipe, buf); 1096b92ce558SJens Axboe buf->ops = NULL; 1097b92ce558SJens Axboe } 1098b92ce558SJens Axboe } 1099b92ce558SJens Axboe pipe->nrbufs = pipe->curbuf = 0; 1100b92ce558SJens Axboe 1101b92ce558SJens Axboe /* 1102b92ce558SJens Axboe * If we transferred some data, return the number of bytes: 1103b92ce558SJens Axboe */ 1104b92ce558SJens Axboe if (bytes > 0) 1105b92ce558SJens Axboe return bytes; 1106b92ce558SJens Axboe 1107b92ce558SJens Axboe return ret; 1108b92ce558SJens Axboe } 1109b92ce558SJens Axboe 1110b92ce558SJens Axboe EXPORT_SYMBOL(do_splice_direct); 1111b92ce558SJens Axboe 111283f9135bSJens Axboe /* 111383f9135bSJens Axboe * Determine where to splice to/from. 111483f9135bSJens Axboe */ 1115529565dcSIngo Molnar static long do_splice(struct file *in, loff_t __user *off_in, 1116529565dcSIngo Molnar struct file *out, loff_t __user *off_out, 1117529565dcSIngo Molnar size_t len, unsigned int flags) 11185274f052SJens Axboe { 11193a326a2cSIngo Molnar struct pipe_inode_info *pipe; 1120cbb7e577SJens Axboe loff_t offset, *off; 1121a4514ebdSJens Axboe long ret; 11225274f052SJens Axboe 11233a326a2cSIngo Molnar pipe = in->f_dentry->d_inode->i_pipe; 1124529565dcSIngo Molnar if (pipe) { 1125529565dcSIngo Molnar if (off_in) 1126529565dcSIngo Molnar return -ESPIPE; 1127b92ce558SJens Axboe if (off_out) { 1128b92ce558SJens Axboe if (out->f_op->llseek == no_llseek) 1129b92ce558SJens Axboe return -EINVAL; 1130cbb7e577SJens Axboe if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1131b92ce558SJens Axboe return -EFAULT; 1132cbb7e577SJens Axboe off = &offset; 1133cbb7e577SJens Axboe } else 1134cbb7e577SJens Axboe off = &out->f_pos; 1135529565dcSIngo Molnar 1136a4514ebdSJens Axboe ret = do_splice_from(pipe, out, off, len, flags); 1137a4514ebdSJens Axboe 1138a4514ebdSJens Axboe if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1139a4514ebdSJens Axboe ret = -EFAULT; 1140a4514ebdSJens Axboe 1141a4514ebdSJens Axboe return ret; 1142529565dcSIngo Molnar } 11435274f052SJens Axboe 11443a326a2cSIngo Molnar pipe = out->f_dentry->d_inode->i_pipe; 1145529565dcSIngo Molnar if (pipe) { 1146529565dcSIngo Molnar if (off_out) 1147529565dcSIngo Molnar return -ESPIPE; 1148b92ce558SJens Axboe if (off_in) { 1149b92ce558SJens Axboe if (in->f_op->llseek == no_llseek) 1150b92ce558SJens Axboe return -EINVAL; 1151cbb7e577SJens Axboe if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1152b92ce558SJens Axboe return -EFAULT; 1153cbb7e577SJens Axboe off = &offset; 1154cbb7e577SJens Axboe } else 1155cbb7e577SJens Axboe off = &in->f_pos; 1156529565dcSIngo Molnar 1157a4514ebdSJens Axboe ret = do_splice_to(in, off, pipe, len, flags); 1158a4514ebdSJens Axboe 1159a4514ebdSJens Axboe if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1160a4514ebdSJens Axboe ret = -EFAULT; 1161a4514ebdSJens Axboe 1162a4514ebdSJens Axboe return ret; 1163529565dcSIngo Molnar } 11645274f052SJens Axboe 11655274f052SJens Axboe return -EINVAL; 11665274f052SJens Axboe } 11675274f052SJens Axboe 1168912d35f8SJens Axboe /* 1169912d35f8SJens Axboe * Map an iov into an array of pages and offset/length tupples. With the 1170912d35f8SJens Axboe * partial_page structure, we can map several non-contiguous ranges into 1171912d35f8SJens Axboe * our ones pages[] map instead of splitting that operation into pieces. 1172912d35f8SJens Axboe * Could easily be exported as a generic helper for other users, in which 1173912d35f8SJens Axboe * case one would probably want to add a 'max_nr_pages' parameter as well. 1174912d35f8SJens Axboe */ 1175912d35f8SJens Axboe static int get_iovec_page_array(const struct iovec __user *iov, 1176912d35f8SJens Axboe unsigned int nr_vecs, struct page **pages, 11777afa6fd0SJens Axboe struct partial_page *partial, int aligned) 1178912d35f8SJens Axboe { 1179912d35f8SJens Axboe int buffers = 0, error = 0; 1180912d35f8SJens Axboe 1181912d35f8SJens Axboe /* 1182912d35f8SJens Axboe * It's ok to take the mmap_sem for reading, even 1183912d35f8SJens Axboe * across a "get_user()". 1184912d35f8SJens Axboe */ 1185912d35f8SJens Axboe down_read(¤t->mm->mmap_sem); 1186912d35f8SJens Axboe 1187912d35f8SJens Axboe while (nr_vecs) { 1188912d35f8SJens Axboe unsigned long off, npages; 1189912d35f8SJens Axboe void __user *base; 1190912d35f8SJens Axboe size_t len; 1191912d35f8SJens Axboe int i; 1192912d35f8SJens Axboe 1193912d35f8SJens Axboe /* 1194912d35f8SJens Axboe * Get user address base and length for this iovec. 1195912d35f8SJens Axboe */ 1196912d35f8SJens Axboe error = get_user(base, &iov->iov_base); 1197912d35f8SJens Axboe if (unlikely(error)) 1198912d35f8SJens Axboe break; 1199912d35f8SJens Axboe error = get_user(len, &iov->iov_len); 1200912d35f8SJens Axboe if (unlikely(error)) 1201912d35f8SJens Axboe break; 1202912d35f8SJens Axboe 1203912d35f8SJens Axboe /* 1204912d35f8SJens Axboe * Sanity check this iovec. 0 read succeeds. 1205912d35f8SJens Axboe */ 1206912d35f8SJens Axboe if (unlikely(!len)) 1207912d35f8SJens Axboe break; 1208912d35f8SJens Axboe error = -EFAULT; 1209912d35f8SJens Axboe if (unlikely(!base)) 1210912d35f8SJens Axboe break; 1211912d35f8SJens Axboe 1212912d35f8SJens Axboe /* 1213912d35f8SJens Axboe * Get this base offset and number of pages, then map 1214912d35f8SJens Axboe * in the user pages. 1215912d35f8SJens Axboe */ 1216912d35f8SJens Axboe off = (unsigned long) base & ~PAGE_MASK; 12177afa6fd0SJens Axboe 12187afa6fd0SJens Axboe /* 12197afa6fd0SJens Axboe * If asked for alignment, the offset must be zero and the 12207afa6fd0SJens Axboe * length a multiple of the PAGE_SIZE. 12217afa6fd0SJens Axboe */ 12227afa6fd0SJens Axboe error = -EINVAL; 12237afa6fd0SJens Axboe if (aligned && (off || len & ~PAGE_MASK)) 12247afa6fd0SJens Axboe break; 12257afa6fd0SJens Axboe 1226912d35f8SJens Axboe npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1227912d35f8SJens Axboe if (npages > PIPE_BUFFERS - buffers) 1228912d35f8SJens Axboe npages = PIPE_BUFFERS - buffers; 1229912d35f8SJens Axboe 1230912d35f8SJens Axboe error = get_user_pages(current, current->mm, 1231912d35f8SJens Axboe (unsigned long) base, npages, 0, 0, 1232912d35f8SJens Axboe &pages[buffers], NULL); 1233912d35f8SJens Axboe 1234912d35f8SJens Axboe if (unlikely(error <= 0)) 1235912d35f8SJens Axboe break; 1236912d35f8SJens Axboe 1237912d35f8SJens Axboe /* 1238912d35f8SJens Axboe * Fill this contiguous range into the partial page map. 1239912d35f8SJens Axboe */ 1240912d35f8SJens Axboe for (i = 0; i < error; i++) { 12417591489aSJens Axboe const int plen = min_t(size_t, len, PAGE_SIZE - off); 1242912d35f8SJens Axboe 1243912d35f8SJens Axboe partial[buffers].offset = off; 1244912d35f8SJens Axboe partial[buffers].len = plen; 1245912d35f8SJens Axboe 1246912d35f8SJens Axboe off = 0; 1247912d35f8SJens Axboe len -= plen; 1248912d35f8SJens Axboe buffers++; 1249912d35f8SJens Axboe } 1250912d35f8SJens Axboe 1251912d35f8SJens Axboe /* 1252912d35f8SJens Axboe * We didn't complete this iov, stop here since it probably 1253912d35f8SJens Axboe * means we have to move some of this into a pipe to 1254912d35f8SJens Axboe * be able to continue. 1255912d35f8SJens Axboe */ 1256912d35f8SJens Axboe if (len) 1257912d35f8SJens Axboe break; 1258912d35f8SJens Axboe 1259912d35f8SJens Axboe /* 1260912d35f8SJens Axboe * Don't continue if we mapped fewer pages than we asked for, 1261912d35f8SJens Axboe * or if we mapped the max number of pages that we have 1262912d35f8SJens Axboe * room for. 1263912d35f8SJens Axboe */ 1264912d35f8SJens Axboe if (error < npages || buffers == PIPE_BUFFERS) 1265912d35f8SJens Axboe break; 1266912d35f8SJens Axboe 1267912d35f8SJens Axboe nr_vecs--; 1268912d35f8SJens Axboe iov++; 1269912d35f8SJens Axboe } 1270912d35f8SJens Axboe 1271912d35f8SJens Axboe up_read(¤t->mm->mmap_sem); 1272912d35f8SJens Axboe 1273912d35f8SJens Axboe if (buffers) 1274912d35f8SJens Axboe return buffers; 1275912d35f8SJens Axboe 1276912d35f8SJens Axboe return error; 1277912d35f8SJens Axboe } 1278912d35f8SJens Axboe 1279912d35f8SJens Axboe /* 1280912d35f8SJens Axboe * vmsplice splices a user address range into a pipe. It can be thought of 1281912d35f8SJens Axboe * as splice-from-memory, where the regular splice is splice-from-file (or 1282912d35f8SJens Axboe * to file). In both cases the output is a pipe, naturally. 1283912d35f8SJens Axboe * 1284912d35f8SJens Axboe * Note that vmsplice only supports splicing _from_ user memory to a pipe, 1285912d35f8SJens Axboe * not the other way around. Splicing from user memory is a simple operation 1286912d35f8SJens Axboe * that can be supported without any funky alignment restrictions or nasty 1287912d35f8SJens Axboe * vm tricks. We simply map in the user memory and fill them into a pipe. 1288912d35f8SJens Axboe * The reverse isn't quite as easy, though. There are two possible solutions 1289912d35f8SJens Axboe * for that: 1290912d35f8SJens Axboe * 1291912d35f8SJens Axboe * - memcpy() the data internally, at which point we might as well just 1292912d35f8SJens Axboe * do a regular read() on the buffer anyway. 1293912d35f8SJens Axboe * - Lots of nasty vm tricks, that are neither fast nor flexible (it 1294912d35f8SJens Axboe * has restriction limitations on both ends of the pipe). 1295912d35f8SJens Axboe * 1296912d35f8SJens Axboe * Alas, it isn't here. 1297912d35f8SJens Axboe * 1298912d35f8SJens Axboe */ 1299912d35f8SJens Axboe static long do_vmsplice(struct file *file, const struct iovec __user *iov, 1300912d35f8SJens Axboe unsigned long nr_segs, unsigned int flags) 1301912d35f8SJens Axboe { 1302912d35f8SJens Axboe struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe; 1303912d35f8SJens Axboe struct page *pages[PIPE_BUFFERS]; 1304912d35f8SJens Axboe struct partial_page partial[PIPE_BUFFERS]; 1305912d35f8SJens Axboe struct splice_pipe_desc spd = { 1306912d35f8SJens Axboe .pages = pages, 1307912d35f8SJens Axboe .partial = partial, 1308912d35f8SJens Axboe .flags = flags, 1309912d35f8SJens Axboe .ops = &user_page_pipe_buf_ops, 1310912d35f8SJens Axboe }; 1311912d35f8SJens Axboe 1312912d35f8SJens Axboe if (unlikely(!pipe)) 1313912d35f8SJens Axboe return -EBADF; 1314912d35f8SJens Axboe if (unlikely(nr_segs > UIO_MAXIOV)) 1315912d35f8SJens Axboe return -EINVAL; 1316912d35f8SJens Axboe else if (unlikely(!nr_segs)) 1317912d35f8SJens Axboe return 0; 1318912d35f8SJens Axboe 13197afa6fd0SJens Axboe spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial, 13207afa6fd0SJens Axboe flags & SPLICE_F_GIFT); 1321912d35f8SJens Axboe if (spd.nr_pages <= 0) 1322912d35f8SJens Axboe return spd.nr_pages; 1323912d35f8SJens Axboe 132400522fb4SJens Axboe return splice_to_pipe(pipe, &spd); 1325912d35f8SJens Axboe } 1326912d35f8SJens Axboe 1327912d35f8SJens Axboe asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, 1328912d35f8SJens Axboe unsigned long nr_segs, unsigned int flags) 1329912d35f8SJens Axboe { 1330912d35f8SJens Axboe struct file *file; 1331912d35f8SJens Axboe long error; 1332912d35f8SJens Axboe int fput; 1333912d35f8SJens Axboe 1334912d35f8SJens Axboe error = -EBADF; 1335912d35f8SJens Axboe file = fget_light(fd, &fput); 1336912d35f8SJens Axboe if (file) { 1337912d35f8SJens Axboe if (file->f_mode & FMODE_WRITE) 1338912d35f8SJens Axboe error = do_vmsplice(file, iov, nr_segs, flags); 1339912d35f8SJens Axboe 1340912d35f8SJens Axboe fput_light(file, fput); 1341912d35f8SJens Axboe } 1342912d35f8SJens Axboe 1343912d35f8SJens Axboe return error; 1344912d35f8SJens Axboe } 1345912d35f8SJens Axboe 1346529565dcSIngo Molnar asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, 1347529565dcSIngo Molnar int fd_out, loff_t __user *off_out, 1348529565dcSIngo Molnar size_t len, unsigned int flags) 13495274f052SJens Axboe { 13505274f052SJens Axboe long error; 13515274f052SJens Axboe struct file *in, *out; 13525274f052SJens Axboe int fput_in, fput_out; 13535274f052SJens Axboe 13545274f052SJens Axboe if (unlikely(!len)) 13555274f052SJens Axboe return 0; 13565274f052SJens Axboe 13575274f052SJens Axboe error = -EBADF; 1358529565dcSIngo Molnar in = fget_light(fd_in, &fput_in); 13595274f052SJens Axboe if (in) { 13605274f052SJens Axboe if (in->f_mode & FMODE_READ) { 1361529565dcSIngo Molnar out = fget_light(fd_out, &fput_out); 13625274f052SJens Axboe if (out) { 13635274f052SJens Axboe if (out->f_mode & FMODE_WRITE) 1364529565dcSIngo Molnar error = do_splice(in, off_in, 1365529565dcSIngo Molnar out, off_out, 1366529565dcSIngo Molnar len, flags); 13675274f052SJens Axboe fput_light(out, fput_out); 13685274f052SJens Axboe } 13695274f052SJens Axboe } 13705274f052SJens Axboe 13715274f052SJens Axboe fput_light(in, fput_in); 13725274f052SJens Axboe } 13735274f052SJens Axboe 13745274f052SJens Axboe return error; 13755274f052SJens Axboe } 137670524490SJens Axboe 137770524490SJens Axboe /* 1378aadd06e5SJens Axboe * Make sure there's data to read. Wait for input if we can, otherwise 1379aadd06e5SJens Axboe * return an appropriate error. 1380aadd06e5SJens Axboe */ 1381aadd06e5SJens Axboe static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1382aadd06e5SJens Axboe { 1383aadd06e5SJens Axboe int ret; 1384aadd06e5SJens Axboe 1385aadd06e5SJens Axboe /* 1386aadd06e5SJens Axboe * Check ->nrbufs without the inode lock first. This function 1387aadd06e5SJens Axboe * is speculative anyways, so missing one is ok. 1388aadd06e5SJens Axboe */ 1389aadd06e5SJens Axboe if (pipe->nrbufs) 1390aadd06e5SJens Axboe return 0; 1391aadd06e5SJens Axboe 1392aadd06e5SJens Axboe ret = 0; 1393aadd06e5SJens Axboe mutex_lock(&pipe->inode->i_mutex); 1394aadd06e5SJens Axboe 1395aadd06e5SJens Axboe while (!pipe->nrbufs) { 1396aadd06e5SJens Axboe if (signal_pending(current)) { 1397aadd06e5SJens Axboe ret = -ERESTARTSYS; 1398aadd06e5SJens Axboe break; 1399aadd06e5SJens Axboe } 1400aadd06e5SJens Axboe if (!pipe->writers) 1401aadd06e5SJens Axboe break; 1402aadd06e5SJens Axboe if (!pipe->waiting_writers) { 1403aadd06e5SJens Axboe if (flags & SPLICE_F_NONBLOCK) { 1404aadd06e5SJens Axboe ret = -EAGAIN; 1405aadd06e5SJens Axboe break; 1406aadd06e5SJens Axboe } 1407aadd06e5SJens Axboe } 1408aadd06e5SJens Axboe pipe_wait(pipe); 1409aadd06e5SJens Axboe } 1410aadd06e5SJens Axboe 1411aadd06e5SJens Axboe mutex_unlock(&pipe->inode->i_mutex); 1412aadd06e5SJens Axboe return ret; 1413aadd06e5SJens Axboe } 1414aadd06e5SJens Axboe 1415aadd06e5SJens Axboe /* 1416aadd06e5SJens Axboe * Make sure there's writeable room. Wait for room if we can, otherwise 1417aadd06e5SJens Axboe * return an appropriate error. 1418aadd06e5SJens Axboe */ 1419aadd06e5SJens Axboe static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1420aadd06e5SJens Axboe { 1421aadd06e5SJens Axboe int ret; 1422aadd06e5SJens Axboe 1423aadd06e5SJens Axboe /* 1424aadd06e5SJens Axboe * Check ->nrbufs without the inode lock first. This function 1425aadd06e5SJens Axboe * is speculative anyways, so missing one is ok. 1426aadd06e5SJens Axboe */ 1427aadd06e5SJens Axboe if (pipe->nrbufs < PIPE_BUFFERS) 1428aadd06e5SJens Axboe return 0; 1429aadd06e5SJens Axboe 1430aadd06e5SJens Axboe ret = 0; 1431aadd06e5SJens Axboe mutex_lock(&pipe->inode->i_mutex); 1432aadd06e5SJens Axboe 1433aadd06e5SJens Axboe while (pipe->nrbufs >= PIPE_BUFFERS) { 1434aadd06e5SJens Axboe if (!pipe->readers) { 1435aadd06e5SJens Axboe send_sig(SIGPIPE, current, 0); 1436aadd06e5SJens Axboe ret = -EPIPE; 1437aadd06e5SJens Axboe break; 1438aadd06e5SJens Axboe } 1439aadd06e5SJens Axboe if (flags & SPLICE_F_NONBLOCK) { 1440aadd06e5SJens Axboe ret = -EAGAIN; 1441aadd06e5SJens Axboe break; 1442aadd06e5SJens Axboe } 1443aadd06e5SJens Axboe if (signal_pending(current)) { 1444aadd06e5SJens Axboe ret = -ERESTARTSYS; 1445aadd06e5SJens Axboe break; 1446aadd06e5SJens Axboe } 1447aadd06e5SJens Axboe pipe->waiting_writers++; 1448aadd06e5SJens Axboe pipe_wait(pipe); 1449aadd06e5SJens Axboe pipe->waiting_writers--; 1450aadd06e5SJens Axboe } 1451aadd06e5SJens Axboe 1452aadd06e5SJens Axboe mutex_unlock(&pipe->inode->i_mutex); 1453aadd06e5SJens Axboe return ret; 1454aadd06e5SJens Axboe } 1455aadd06e5SJens Axboe 1456aadd06e5SJens Axboe /* 145770524490SJens Axboe * Link contents of ipipe to opipe. 145870524490SJens Axboe */ 145970524490SJens Axboe static int link_pipe(struct pipe_inode_info *ipipe, 146070524490SJens Axboe struct pipe_inode_info *opipe, 146170524490SJens Axboe size_t len, unsigned int flags) 146270524490SJens Axboe { 146370524490SJens Axboe struct pipe_buffer *ibuf, *obuf; 1464aadd06e5SJens Axboe int ret = 0, i = 0, nbuf; 146570524490SJens Axboe 146670524490SJens Axboe /* 146770524490SJens Axboe * Potential ABBA deadlock, work around it by ordering lock 146870524490SJens Axboe * grabbing by inode address. Otherwise two different processes 146970524490SJens Axboe * could deadlock (one doing tee from A -> B, the other from B -> A). 147070524490SJens Axboe */ 147162752ee1SMark Fasheh inode_double_lock(ipipe->inode, opipe->inode); 147270524490SJens Axboe 1473aadd06e5SJens Axboe do { 147470524490SJens Axboe if (!opipe->readers) { 147570524490SJens Axboe send_sig(SIGPIPE, current, 0); 147670524490SJens Axboe if (!ret) 147770524490SJens Axboe ret = -EPIPE; 147870524490SJens Axboe break; 147970524490SJens Axboe } 148070524490SJens Axboe 148170524490SJens Axboe /* 1482aadd06e5SJens Axboe * If we have iterated all input buffers or ran out of 1483aadd06e5SJens Axboe * output room, break. 148470524490SJens Axboe */ 1485aadd06e5SJens Axboe if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) 1486aadd06e5SJens Axboe break; 1487aadd06e5SJens Axboe 1488aadd06e5SJens Axboe ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1)); 1489aadd06e5SJens Axboe nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1); 149070524490SJens Axboe 149170524490SJens Axboe /* 149270524490SJens Axboe * Get a reference to this pipe buffer, 149370524490SJens Axboe * so we can copy the contents over. 149470524490SJens Axboe */ 149570524490SJens Axboe ibuf->ops->get(ipipe, ibuf); 149670524490SJens Axboe 149770524490SJens Axboe obuf = opipe->bufs + nbuf; 149870524490SJens Axboe *obuf = *ibuf; 149970524490SJens Axboe 15007afa6fd0SJens Axboe /* 15017afa6fd0SJens Axboe * Don't inherit the gift flag, we need to 15027afa6fd0SJens Axboe * prevent multiple steals of this page. 15037afa6fd0SJens Axboe */ 15047afa6fd0SJens Axboe obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 15057afa6fd0SJens Axboe 150670524490SJens Axboe if (obuf->len > len) 150770524490SJens Axboe obuf->len = len; 150870524490SJens Axboe 150970524490SJens Axboe opipe->nrbufs++; 151070524490SJens Axboe ret += obuf->len; 151170524490SJens Axboe len -= obuf->len; 1512aadd06e5SJens Axboe i++; 1513aadd06e5SJens Axboe } while (len); 151470524490SJens Axboe 151562752ee1SMark Fasheh inode_double_unlock(ipipe->inode, opipe->inode); 151670524490SJens Axboe 1517aadd06e5SJens Axboe /* 1518aadd06e5SJens Axboe * If we put data in the output pipe, wakeup any potential readers. 1519aadd06e5SJens Axboe */ 1520aadd06e5SJens Axboe if (ret > 0) { 152170524490SJens Axboe smp_mb(); 152270524490SJens Axboe if (waitqueue_active(&opipe->wait)) 152370524490SJens Axboe wake_up_interruptible(&opipe->wait); 152470524490SJens Axboe kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); 152570524490SJens Axboe } 152670524490SJens Axboe 152770524490SJens Axboe return ret; 152870524490SJens Axboe } 152970524490SJens Axboe 153070524490SJens Axboe /* 153170524490SJens Axboe * This is a tee(1) implementation that works on pipes. It doesn't copy 153270524490SJens Axboe * any data, it simply references the 'in' pages on the 'out' pipe. 153370524490SJens Axboe * The 'flags' used are the SPLICE_F_* variants, currently the only 153470524490SJens Axboe * applicable one is SPLICE_F_NONBLOCK. 153570524490SJens Axboe */ 153670524490SJens Axboe static long do_tee(struct file *in, struct file *out, size_t len, 153770524490SJens Axboe unsigned int flags) 153870524490SJens Axboe { 153970524490SJens Axboe struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe; 154070524490SJens Axboe struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe; 1541aadd06e5SJens Axboe int ret = -EINVAL; 154270524490SJens Axboe 154370524490SJens Axboe /* 1544aadd06e5SJens Axboe * Duplicate the contents of ipipe to opipe without actually 1545aadd06e5SJens Axboe * copying the data. 154670524490SJens Axboe */ 1547aadd06e5SJens Axboe if (ipipe && opipe && ipipe != opipe) { 1548aadd06e5SJens Axboe /* 1549aadd06e5SJens Axboe * Keep going, unless we encounter an error. The ipipe/opipe 1550aadd06e5SJens Axboe * ordering doesn't really matter. 1551aadd06e5SJens Axboe */ 1552aadd06e5SJens Axboe ret = link_ipipe_prep(ipipe, flags); 1553aadd06e5SJens Axboe if (!ret) { 1554aadd06e5SJens Axboe ret = link_opipe_prep(opipe, flags); 1555aadd06e5SJens Axboe if (!ret) { 1556aadd06e5SJens Axboe ret = link_pipe(ipipe, opipe, len, flags); 1557aadd06e5SJens Axboe if (!ret && (flags & SPLICE_F_NONBLOCK)) 1558aadd06e5SJens Axboe ret = -EAGAIN; 1559aadd06e5SJens Axboe } 1560aadd06e5SJens Axboe } 1561aadd06e5SJens Axboe } 156270524490SJens Axboe 1563aadd06e5SJens Axboe return ret; 156470524490SJens Axboe } 156570524490SJens Axboe 156670524490SJens Axboe asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags) 156770524490SJens Axboe { 156870524490SJens Axboe struct file *in; 156970524490SJens Axboe int error, fput_in; 157070524490SJens Axboe 157170524490SJens Axboe if (unlikely(!len)) 157270524490SJens Axboe return 0; 157370524490SJens Axboe 157470524490SJens Axboe error = -EBADF; 157570524490SJens Axboe in = fget_light(fdin, &fput_in); 157670524490SJens Axboe if (in) { 157770524490SJens Axboe if (in->f_mode & FMODE_READ) { 157870524490SJens Axboe int fput_out; 157970524490SJens Axboe struct file *out = fget_light(fdout, &fput_out); 158070524490SJens Axboe 158170524490SJens Axboe if (out) { 158270524490SJens Axboe if (out->f_mode & FMODE_WRITE) 158370524490SJens Axboe error = do_tee(in, out, len, flags); 158470524490SJens Axboe fput_light(out, fput_out); 158570524490SJens Axboe } 158670524490SJens Axboe } 158770524490SJens Axboe fput_light(in, fput_in); 158870524490SJens Axboe } 158970524490SJens Axboe 159070524490SJens Axboe return error; 159170524490SJens Axboe } 1592