15274f052SJens Axboe /* 25274f052SJens Axboe * "splice": joining two ropes together by interweaving their strands. 35274f052SJens Axboe * 45274f052SJens Axboe * This is the "extended pipe" functionality, where a pipe is used as 55274f052SJens Axboe * an arbitrary in-memory buffer. Think of a pipe as a small kernel 65274f052SJens Axboe * buffer that you can use to transfer data from one end to the other. 75274f052SJens Axboe * 85274f052SJens Axboe * The traditional unix read/write is extended with a "splice()" operation 95274f052SJens Axboe * that transfers data buffers to or from a pipe buffer. 105274f052SJens Axboe * 115274f052SJens Axboe * Named by Larry McVoy, original implementation from Linus, extended by 12c2058e06SJens Axboe * Jens to support splicing to files, network, direct splicing, etc and 13c2058e06SJens Axboe * fixing lots of bugs. 145274f052SJens Axboe * 15c2058e06SJens Axboe * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de> 16c2058e06SJens Axboe * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> 17c2058e06SJens Axboe * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> 185274f052SJens Axboe * 195274f052SJens Axboe */ 205274f052SJens Axboe #include <linux/fs.h> 215274f052SJens Axboe #include <linux/file.h> 225274f052SJens Axboe #include <linux/pagemap.h> 235274f052SJens Axboe #include <linux/pipe_fs_i.h> 245274f052SJens Axboe #include <linux/mm_inline.h> 255abc97aaSJens Axboe #include <linux/swap.h> 264f6f0bd2SJens Axboe #include <linux/writeback.h> 274f6f0bd2SJens Axboe #include <linux/buffer_head.h> 28a0f06780SJeff Garzik #include <linux/module.h> 294f6f0bd2SJens Axboe #include <linux/syscalls.h> 30912d35f8SJens Axboe #include <linux/uio.h> 315274f052SJens Axboe 32912d35f8SJens Axboe struct partial_page { 33912d35f8SJens Axboe unsigned int offset; 34912d35f8SJens Axboe unsigned int len; 35912d35f8SJens Axboe }; 36912d35f8SJens Axboe 37912d35f8SJens Axboe /* 3800522fb4SJens Axboe * Passed to splice_to_pipe 39912d35f8SJens Axboe */ 40912d35f8SJens Axboe struct splice_pipe_desc { 41912d35f8SJens Axboe struct page **pages; /* page map */ 42912d35f8SJens Axboe struct partial_page *partial; /* pages[] may not be contig */ 43912d35f8SJens Axboe int nr_pages; /* number of pages in map */ 44912d35f8SJens Axboe unsigned int flags; /* splice flags */ 45912d35f8SJens Axboe struct pipe_buf_operations *ops;/* ops associated with output pipe */ 46912d35f8SJens Axboe }; 47912d35f8SJens Axboe 4883f9135bSJens Axboe /* 4983f9135bSJens Axboe * Attempt to steal a page from a pipe buffer. This should perhaps go into 5083f9135bSJens Axboe * a vm helper function, it's already simplified quite a bit by the 5183f9135bSJens Axboe * addition of remove_mapping(). If success is returned, the caller may 5283f9135bSJens Axboe * attempt to reuse this page for another destination. 5383f9135bSJens Axboe */ 545abc97aaSJens Axboe static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, 555abc97aaSJens Axboe struct pipe_buffer *buf) 565abc97aaSJens Axboe { 575abc97aaSJens Axboe struct page *page = buf->page; 584f6f0bd2SJens Axboe struct address_space *mapping = page_mapping(page); 595abc97aaSJens Axboe 609e0267c2SJens Axboe lock_page(page); 619e0267c2SJens Axboe 625abc97aaSJens Axboe WARN_ON(!PageUptodate(page)); 635abc97aaSJens Axboe 64ad8d6f0aSJens Axboe /* 65ad8d6f0aSJens Axboe * At least for ext2 with nobh option, we need to wait on writeback 66ad8d6f0aSJens Axboe * completing on this page, since we'll remove it from the pagecache. 67ad8d6f0aSJens Axboe * Otherwise truncate wont wait on the page, allowing the disk 68ad8d6f0aSJens Axboe * blocks to be reused by someone else before we actually wrote our 69ad8d6f0aSJens Axboe * data to them. fs corruption ensues. 70ad8d6f0aSJens Axboe */ 71ad8d6f0aSJens Axboe wait_on_page_writeback(page); 72ad8d6f0aSJens Axboe 734f6f0bd2SJens Axboe if (PagePrivate(page)) 744f6f0bd2SJens Axboe try_to_release_page(page, mapping_gfp_mask(mapping)); 754f6f0bd2SJens Axboe 769e0267c2SJens Axboe if (!remove_mapping(mapping, page)) { 779e0267c2SJens Axboe unlock_page(page); 785abc97aaSJens Axboe return 1; 799e0267c2SJens Axboe } 805abc97aaSJens Axboe 811432873aSJens Axboe buf->flags |= PIPE_BUF_FLAG_LRU; 825abc97aaSJens Axboe return 0; 835abc97aaSJens Axboe } 845abc97aaSJens Axboe 855274f052SJens Axboe static void page_cache_pipe_buf_release(struct pipe_inode_info *info, 865274f052SJens Axboe struct pipe_buffer *buf) 875274f052SJens Axboe { 885274f052SJens Axboe page_cache_release(buf->page); 891432873aSJens Axboe buf->flags &= ~PIPE_BUF_FLAG_LRU; 905274f052SJens Axboe } 915274f052SJens Axboe 92f84d7519SJens Axboe static int page_cache_pipe_buf_pin(struct pipe_inode_info *info, 935274f052SJens Axboe struct pipe_buffer *buf) 945274f052SJens Axboe { 955274f052SJens Axboe struct page *page = buf->page; 9649d0b21bSJens Axboe int err; 975274f052SJens Axboe 985274f052SJens Axboe if (!PageUptodate(page)) { 9949d0b21bSJens Axboe lock_page(page); 1005274f052SJens Axboe 10149d0b21bSJens Axboe /* 10249d0b21bSJens Axboe * Page got truncated/unhashed. This will cause a 0-byte 10373d62d83SIngo Molnar * splice, if this is the first page. 10449d0b21bSJens Axboe */ 1055274f052SJens Axboe if (!page->mapping) { 10649d0b21bSJens Axboe err = -ENODATA; 10749d0b21bSJens Axboe goto error; 1085274f052SJens Axboe } 1095274f052SJens Axboe 11049d0b21bSJens Axboe /* 11173d62d83SIngo Molnar * Uh oh, read-error from disk. 11249d0b21bSJens Axboe */ 11349d0b21bSJens Axboe if (!PageUptodate(page)) { 11449d0b21bSJens Axboe err = -EIO; 11549d0b21bSJens Axboe goto error; 11649d0b21bSJens Axboe } 11749d0b21bSJens Axboe 11849d0b21bSJens Axboe /* 119f84d7519SJens Axboe * Page is ok afterall, we are done. 12049d0b21bSJens Axboe */ 12149d0b21bSJens Axboe unlock_page(page); 12249d0b21bSJens Axboe } 12349d0b21bSJens Axboe 124f84d7519SJens Axboe return 0; 12549d0b21bSJens Axboe error: 12649d0b21bSJens Axboe unlock_page(page); 127f84d7519SJens Axboe return err; 12870524490SJens Axboe } 12970524490SJens Axboe 1305274f052SJens Axboe static struct pipe_buf_operations page_cache_pipe_buf_ops = { 1315274f052SJens Axboe .can_merge = 0, 132f84d7519SJens Axboe .map = generic_pipe_buf_map, 133f84d7519SJens Axboe .unmap = generic_pipe_buf_unmap, 134f84d7519SJens Axboe .pin = page_cache_pipe_buf_pin, 1355274f052SJens Axboe .release = page_cache_pipe_buf_release, 1365abc97aaSJens Axboe .steal = page_cache_pipe_buf_steal, 137f84d7519SJens Axboe .get = generic_pipe_buf_get, 1385274f052SJens Axboe }; 1395274f052SJens Axboe 140912d35f8SJens Axboe static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, 141912d35f8SJens Axboe struct pipe_buffer *buf) 142912d35f8SJens Axboe { 1437afa6fd0SJens Axboe if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) 144912d35f8SJens Axboe return 1; 1457afa6fd0SJens Axboe 1461432873aSJens Axboe buf->flags |= PIPE_BUF_FLAG_LRU; 147330ab716SJens Axboe return generic_pipe_buf_steal(pipe, buf); 148912d35f8SJens Axboe } 149912d35f8SJens Axboe 150912d35f8SJens Axboe static struct pipe_buf_operations user_page_pipe_buf_ops = { 151912d35f8SJens Axboe .can_merge = 0, 152f84d7519SJens Axboe .map = generic_pipe_buf_map, 153f84d7519SJens Axboe .unmap = generic_pipe_buf_unmap, 154f84d7519SJens Axboe .pin = generic_pipe_buf_pin, 155912d35f8SJens Axboe .release = page_cache_pipe_buf_release, 156912d35f8SJens Axboe .steal = user_page_pipe_buf_steal, 157f84d7519SJens Axboe .get = generic_pipe_buf_get, 158912d35f8SJens Axboe }; 159912d35f8SJens Axboe 16083f9135bSJens Axboe /* 16183f9135bSJens Axboe * Pipe output worker. This sets up our pipe format with the page cache 16283f9135bSJens Axboe * pipe buffer operations. Otherwise very similar to the regular pipe_writev(). 16383f9135bSJens Axboe */ 16400522fb4SJens Axboe static ssize_t splice_to_pipe(struct pipe_inode_info *pipe, 165912d35f8SJens Axboe struct splice_pipe_desc *spd) 1665274f052SJens Axboe { 167912d35f8SJens Axboe int ret, do_wakeup, page_nr; 1685274f052SJens Axboe 1695274f052SJens Axboe ret = 0; 1705274f052SJens Axboe do_wakeup = 0; 171912d35f8SJens Axboe page_nr = 0; 1725274f052SJens Axboe 1733a326a2cSIngo Molnar if (pipe->inode) 1743a326a2cSIngo Molnar mutex_lock(&pipe->inode->i_mutex); 1755274f052SJens Axboe 1765274f052SJens Axboe for (;;) { 1773a326a2cSIngo Molnar if (!pipe->readers) { 1785274f052SJens Axboe send_sig(SIGPIPE, current, 0); 1795274f052SJens Axboe if (!ret) 1805274f052SJens Axboe ret = -EPIPE; 1815274f052SJens Axboe break; 1825274f052SJens Axboe } 1835274f052SJens Axboe 1846f767b04SJens Axboe if (pipe->nrbufs < PIPE_BUFFERS) { 1856f767b04SJens Axboe int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); 1863a326a2cSIngo Molnar struct pipe_buffer *buf = pipe->bufs + newbuf; 1875274f052SJens Axboe 188912d35f8SJens Axboe buf->page = spd->pages[page_nr]; 189912d35f8SJens Axboe buf->offset = spd->partial[page_nr].offset; 190912d35f8SJens Axboe buf->len = spd->partial[page_nr].len; 191912d35f8SJens Axboe buf->ops = spd->ops; 1927afa6fd0SJens Axboe if (spd->flags & SPLICE_F_GIFT) 1937afa6fd0SJens Axboe buf->flags |= PIPE_BUF_FLAG_GIFT; 1947afa6fd0SJens Axboe 1956f767b04SJens Axboe pipe->nrbufs++; 196912d35f8SJens Axboe page_nr++; 197912d35f8SJens Axboe ret += buf->len; 198912d35f8SJens Axboe 1996f767b04SJens Axboe if (pipe->inode) 2005274f052SJens Axboe do_wakeup = 1; 2015274f052SJens Axboe 202912d35f8SJens Axboe if (!--spd->nr_pages) 2035274f052SJens Axboe break; 2046f767b04SJens Axboe if (pipe->nrbufs < PIPE_BUFFERS) 2055274f052SJens Axboe continue; 2065274f052SJens Axboe 2075274f052SJens Axboe break; 2085274f052SJens Axboe } 2095274f052SJens Axboe 210912d35f8SJens Axboe if (spd->flags & SPLICE_F_NONBLOCK) { 21129e35094SLinus Torvalds if (!ret) 21229e35094SLinus Torvalds ret = -EAGAIN; 21329e35094SLinus Torvalds break; 21429e35094SLinus Torvalds } 21529e35094SLinus Torvalds 2165274f052SJens Axboe if (signal_pending(current)) { 2175274f052SJens Axboe if (!ret) 2185274f052SJens Axboe ret = -ERESTARTSYS; 2195274f052SJens Axboe break; 2205274f052SJens Axboe } 2215274f052SJens Axboe 2225274f052SJens Axboe if (do_wakeup) { 223c0bd1f65SJens Axboe smp_mb(); 2243a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 2253a326a2cSIngo Molnar wake_up_interruptible_sync(&pipe->wait); 2263a326a2cSIngo Molnar kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 2275274f052SJens Axboe do_wakeup = 0; 2285274f052SJens Axboe } 2295274f052SJens Axboe 2303a326a2cSIngo Molnar pipe->waiting_writers++; 2313a326a2cSIngo Molnar pipe_wait(pipe); 2323a326a2cSIngo Molnar pipe->waiting_writers--; 2335274f052SJens Axboe } 2345274f052SJens Axboe 2353a326a2cSIngo Molnar if (pipe->inode) 2363a326a2cSIngo Molnar mutex_unlock(&pipe->inode->i_mutex); 2375274f052SJens Axboe 2385274f052SJens Axboe if (do_wakeup) { 239c0bd1f65SJens Axboe smp_mb(); 2403a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 2413a326a2cSIngo Molnar wake_up_interruptible(&pipe->wait); 2423a326a2cSIngo Molnar kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 2435274f052SJens Axboe } 2445274f052SJens Axboe 245912d35f8SJens Axboe while (page_nr < spd->nr_pages) 246912d35f8SJens Axboe page_cache_release(spd->pages[page_nr++]); 2475274f052SJens Axboe 2485274f052SJens Axboe return ret; 2495274f052SJens Axboe } 2505274f052SJens Axboe 2513a326a2cSIngo Molnar static int 252cbb7e577SJens Axboe __generic_file_splice_read(struct file *in, loff_t *ppos, 253cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 254cbb7e577SJens Axboe unsigned int flags) 2555274f052SJens Axboe { 2565274f052SJens Axboe struct address_space *mapping = in->f_mapping; 257912d35f8SJens Axboe unsigned int loff, nr_pages; 25816c523ddSJens Axboe struct page *pages[PIPE_BUFFERS]; 259912d35f8SJens Axboe struct partial_page partial[PIPE_BUFFERS]; 2605274f052SJens Axboe struct page *page; 26191ad66efSJens Axboe pgoff_t index, end_index; 26291ad66efSJens Axboe loff_t isize; 263912d35f8SJens Axboe size_t total_len; 264eb20796bSJens Axboe int error, page_nr; 265912d35f8SJens Axboe struct splice_pipe_desc spd = { 266912d35f8SJens Axboe .pages = pages, 267912d35f8SJens Axboe .partial = partial, 268912d35f8SJens Axboe .flags = flags, 269912d35f8SJens Axboe .ops = &page_cache_pipe_buf_ops, 270912d35f8SJens Axboe }; 2715274f052SJens Axboe 272cbb7e577SJens Axboe index = *ppos >> PAGE_CACHE_SHIFT; 273912d35f8SJens Axboe loff = *ppos & ~PAGE_CACHE_MASK; 274912d35f8SJens Axboe nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 2755274f052SJens Axboe 2765274f052SJens Axboe if (nr_pages > PIPE_BUFFERS) 2775274f052SJens Axboe nr_pages = PIPE_BUFFERS; 2785274f052SJens Axboe 2795274f052SJens Axboe /* 28073d62d83SIngo Molnar * Initiate read-ahead on this page range. however, don't call into 2810b749ce3SJens Axboe * read-ahead if this is a non-zero offset (we are likely doing small 2820b749ce3SJens Axboe * chunk splice and the page is already there) for a single page. 2835274f052SJens Axboe */ 284eb645a24SJens Axboe if (!loff || nr_pages > 1) 285eb645a24SJens Axboe page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages); 2865274f052SJens Axboe 2875274f052SJens Axboe /* 28873d62d83SIngo Molnar * Now fill in the holes: 2895274f052SJens Axboe */ 2907480a904SJens Axboe error = 0; 291912d35f8SJens Axboe total_len = 0; 29282aa5d61SJens Axboe 29382aa5d61SJens Axboe /* 294eb20796bSJens Axboe * Lookup the (hopefully) full range of pages we need. 29582aa5d61SJens Axboe */ 296eb20796bSJens Axboe spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages); 297eb20796bSJens Axboe 2985274f052SJens Axboe /* 299eb20796bSJens Axboe * If find_get_pages_contig() returned fewer pages than we needed, 300eb20796bSJens Axboe * allocate the rest. 301eb20796bSJens Axboe */ 302eb20796bSJens Axboe index += spd.nr_pages; 303eb20796bSJens Axboe while (spd.nr_pages < nr_pages) { 304eb20796bSJens Axboe /* 305eb20796bSJens Axboe * Page could be there, find_get_pages_contig() breaks on 306eb20796bSJens Axboe * the first hole. 3075274f052SJens Axboe */ 3087480a904SJens Axboe page = find_get_page(mapping, index); 3097480a904SJens Axboe if (!page) { 3107480a904SJens Axboe /* 311e27dedd8SJens Axboe * Make sure the read-ahead engine is notified 312e27dedd8SJens Axboe * about this failure. 313e27dedd8SJens Axboe */ 314e27dedd8SJens Axboe handle_ra_miss(mapping, &in->f_ra, index); 315e27dedd8SJens Axboe 316e27dedd8SJens Axboe /* 317eb20796bSJens Axboe * page didn't exist, allocate one. 3187480a904SJens Axboe */ 3197480a904SJens Axboe page = page_cache_alloc_cold(mapping); 3205274f052SJens Axboe if (!page) 3215274f052SJens Axboe break; 3225274f052SJens Axboe 3237480a904SJens Axboe error = add_to_page_cache_lru(page, mapping, index, 3247480a904SJens Axboe mapping_gfp_mask(mapping)); 3255274f052SJens Axboe if (unlikely(error)) { 3265274f052SJens Axboe page_cache_release(page); 3275274f052SJens Axboe break; 3285274f052SJens Axboe } 329eb20796bSJens Axboe /* 330eb20796bSJens Axboe * add_to_page_cache() locks the page, unlock it 331eb20796bSJens Axboe * to avoid convoluting the logic below even more. 332eb20796bSJens Axboe */ 333eb20796bSJens Axboe unlock_page(page); 3345274f052SJens Axboe } 3357480a904SJens Axboe 336eb20796bSJens Axboe pages[spd.nr_pages++] = page; 337eb20796bSJens Axboe index++; 338eb20796bSJens Axboe } 339eb20796bSJens Axboe 340eb20796bSJens Axboe /* 341eb20796bSJens Axboe * Now loop over the map and see if we need to start IO on any 342eb20796bSJens Axboe * pages, fill in the partial map, etc. 343eb20796bSJens Axboe */ 344eb20796bSJens Axboe index = *ppos >> PAGE_CACHE_SHIFT; 345eb20796bSJens Axboe nr_pages = spd.nr_pages; 346eb20796bSJens Axboe spd.nr_pages = 0; 347eb20796bSJens Axboe for (page_nr = 0; page_nr < nr_pages; page_nr++) { 348eb20796bSJens Axboe unsigned int this_len; 349eb20796bSJens Axboe 350eb20796bSJens Axboe if (!len) 351eb20796bSJens Axboe break; 352eb20796bSJens Axboe 353eb20796bSJens Axboe /* 354eb20796bSJens Axboe * this_len is the max we'll use from this page 355eb20796bSJens Axboe */ 356eb20796bSJens Axboe this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 357eb20796bSJens Axboe page = pages[page_nr]; 358eb20796bSJens Axboe 3597480a904SJens Axboe /* 3607480a904SJens Axboe * If the page isn't uptodate, we may need to start io on it 3617480a904SJens Axboe */ 3627480a904SJens Axboe if (!PageUptodate(page)) { 363c4f895cbSJens Axboe /* 364c4f895cbSJens Axboe * If in nonblock mode then dont block on waiting 365c4f895cbSJens Axboe * for an in-flight io page 366c4f895cbSJens Axboe */ 367c4f895cbSJens Axboe if (flags & SPLICE_F_NONBLOCK) 368c4f895cbSJens Axboe break; 369c4f895cbSJens Axboe 3707480a904SJens Axboe lock_page(page); 3717480a904SJens Axboe 3727480a904SJens Axboe /* 3737480a904SJens Axboe * page was truncated, stop here. if this isn't the 3747480a904SJens Axboe * first page, we'll just complete what we already 3757480a904SJens Axboe * added 3767480a904SJens Axboe */ 3777480a904SJens Axboe if (!page->mapping) { 3787480a904SJens Axboe unlock_page(page); 3797480a904SJens Axboe break; 3807480a904SJens Axboe } 3817480a904SJens Axboe /* 3827480a904SJens Axboe * page was already under io and is now done, great 3837480a904SJens Axboe */ 3847480a904SJens Axboe if (PageUptodate(page)) { 3857480a904SJens Axboe unlock_page(page); 3867480a904SJens Axboe goto fill_it; 3877480a904SJens Axboe } 3887480a904SJens Axboe 3897480a904SJens Axboe /* 3907480a904SJens Axboe * need to read in the page 3917480a904SJens Axboe */ 3927480a904SJens Axboe error = mapping->a_ops->readpage(in, page); 3937480a904SJens Axboe if (unlikely(error)) { 394eb20796bSJens Axboe /* 395eb20796bSJens Axboe * We really should re-lookup the page here, 396eb20796bSJens Axboe * but it complicates things a lot. Instead 397eb20796bSJens Axboe * lets just do what we already stored, and 398eb20796bSJens Axboe * we'll get it the next time we are called. 399eb20796bSJens Axboe */ 4007480a904SJens Axboe if (error == AOP_TRUNCATED_PAGE) 401eb20796bSJens Axboe error = 0; 402eb20796bSJens Axboe 4037480a904SJens Axboe break; 4047480a904SJens Axboe } 40591ad66efSJens Axboe 40691ad66efSJens Axboe /* 40791ad66efSJens Axboe * i_size must be checked after ->readpage(). 40891ad66efSJens Axboe */ 40991ad66efSJens Axboe isize = i_size_read(mapping->host); 41091ad66efSJens Axboe end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 411eb20796bSJens Axboe if (unlikely(!isize || index > end_index)) 41291ad66efSJens Axboe break; 41391ad66efSJens Axboe 41491ad66efSJens Axboe /* 41591ad66efSJens Axboe * if this is the last page, see if we need to shrink 41691ad66efSJens Axboe * the length and stop 41791ad66efSJens Axboe */ 41891ad66efSJens Axboe if (end_index == index) { 41991ad66efSJens Axboe loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK); 420eb20796bSJens Axboe if (total_len + loff > isize) 42191ad66efSJens Axboe break; 42291ad66efSJens Axboe /* 42391ad66efSJens Axboe * force quit after adding this page 42491ad66efSJens Axboe */ 425eb20796bSJens Axboe len = this_len; 42682aa5d61SJens Axboe this_len = min(this_len, loff); 427912d35f8SJens Axboe loff = 0; 42891ad66efSJens Axboe } 4297480a904SJens Axboe } 4307480a904SJens Axboe fill_it: 431eb20796bSJens Axboe partial[page_nr].offset = loff; 432eb20796bSJens Axboe partial[page_nr].len = this_len; 43382aa5d61SJens Axboe len -= this_len; 434912d35f8SJens Axboe total_len += this_len; 43591ad66efSJens Axboe loff = 0; 436eb20796bSJens Axboe spd.nr_pages++; 437eb20796bSJens Axboe index++; 4385274f052SJens Axboe } 4395274f052SJens Axboe 440eb20796bSJens Axboe /* 441eb20796bSJens Axboe * Release any pages at the end, if we quit early. 'i' is how far 442eb20796bSJens Axboe * we got, 'nr_pages' is how many pages are in the map. 443eb20796bSJens Axboe */ 444eb20796bSJens Axboe while (page_nr < nr_pages) 445eb20796bSJens Axboe page_cache_release(pages[page_nr++]); 446eb20796bSJens Axboe 447912d35f8SJens Axboe if (spd.nr_pages) 44800522fb4SJens Axboe return splice_to_pipe(pipe, &spd); 44916c523ddSJens Axboe 4507480a904SJens Axboe return error; 4515274f052SJens Axboe } 4525274f052SJens Axboe 45383f9135bSJens Axboe /** 45483f9135bSJens Axboe * generic_file_splice_read - splice data from file to a pipe 45583f9135bSJens Axboe * @in: file to splice from 45683f9135bSJens Axboe * @pipe: pipe to splice to 45783f9135bSJens Axboe * @len: number of bytes to splice 45883f9135bSJens Axboe * @flags: splice modifier flags 45983f9135bSJens Axboe * 46083f9135bSJens Axboe * Will read pages from given file and fill them into a pipe. 46183f9135bSJens Axboe */ 462cbb7e577SJens Axboe ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, 463cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 464cbb7e577SJens Axboe unsigned int flags) 4655274f052SJens Axboe { 4665274f052SJens Axboe ssize_t spliced; 4675274f052SJens Axboe int ret; 4685274f052SJens Axboe 4695274f052SJens Axboe ret = 0; 4705274f052SJens Axboe spliced = 0; 4713a326a2cSIngo Molnar 4725274f052SJens Axboe while (len) { 473cbb7e577SJens Axboe ret = __generic_file_splice_read(in, ppos, pipe, len, flags); 4745274f052SJens Axboe 475c4f895cbSJens Axboe if (ret < 0) 4765274f052SJens Axboe break; 477c4f895cbSJens Axboe else if (!ret) { 478c4f895cbSJens Axboe if (spliced) 479c4f895cbSJens Axboe break; 480c4f895cbSJens Axboe if (flags & SPLICE_F_NONBLOCK) { 481c4f895cbSJens Axboe ret = -EAGAIN; 482c4f895cbSJens Axboe break; 483c4f895cbSJens Axboe } 484c4f895cbSJens Axboe } 4855274f052SJens Axboe 486cbb7e577SJens Axboe *ppos += ret; 4875274f052SJens Axboe len -= ret; 4885274f052SJens Axboe spliced += ret; 4895274f052SJens Axboe } 4905274f052SJens Axboe 4915274f052SJens Axboe if (spliced) 4925274f052SJens Axboe return spliced; 4935274f052SJens Axboe 4945274f052SJens Axboe return ret; 4955274f052SJens Axboe } 4965274f052SJens Axboe 497059a8f37SJens Axboe EXPORT_SYMBOL(generic_file_splice_read); 498059a8f37SJens Axboe 4995274f052SJens Axboe /* 5004f6f0bd2SJens Axboe * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' 501016b661eSJens Axboe * using sendpage(). Return the number of bytes sent. 5025274f052SJens Axboe */ 5035274f052SJens Axboe static int pipe_to_sendpage(struct pipe_inode_info *info, 5045274f052SJens Axboe struct pipe_buffer *buf, struct splice_desc *sd) 5055274f052SJens Axboe { 5065274f052SJens Axboe struct file *file = sd->file; 5075274f052SJens Axboe loff_t pos = sd->pos; 508f84d7519SJens Axboe int ret, more; 5095274f052SJens Axboe 510f84d7519SJens Axboe ret = buf->ops->pin(info, buf); 511f84d7519SJens Axboe if (!ret) { 512b2b39fa4SJens Axboe more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; 5135274f052SJens Axboe 514f84d7519SJens Axboe ret = file->f_op->sendpage(file, buf->page, buf->offset, 515f84d7519SJens Axboe sd->len, &pos, more); 516f84d7519SJens Axboe } 5175274f052SJens Axboe 518016b661eSJens Axboe return ret; 5195274f052SJens Axboe } 5205274f052SJens Axboe 5215274f052SJens Axboe /* 5225274f052SJens Axboe * This is a little more tricky than the file -> pipe splicing. There are 5235274f052SJens Axboe * basically three cases: 5245274f052SJens Axboe * 5255274f052SJens Axboe * - Destination page already exists in the address space and there 5265274f052SJens Axboe * are users of it. For that case we have no other option that 5275274f052SJens Axboe * copying the data. Tough luck. 5285274f052SJens Axboe * - Destination page already exists in the address space, but there 5295274f052SJens Axboe * are no users of it. Make sure it's uptodate, then drop it. Fall 5305274f052SJens Axboe * through to last case. 5315274f052SJens Axboe * - Destination page does not exist, we can add the pipe page to 5325274f052SJens Axboe * the page cache and avoid the copy. 5335274f052SJens Axboe * 53483f9135bSJens Axboe * If asked to move pages to the output file (SPLICE_F_MOVE is set in 53583f9135bSJens Axboe * sd->flags), we attempt to migrate pages from the pipe to the output 53683f9135bSJens Axboe * file address space page cache. This is possible if no one else has 53783f9135bSJens Axboe * the pipe page referenced outside of the pipe and page cache. If 53883f9135bSJens Axboe * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 53983f9135bSJens Axboe * a new page in the output file page cache and fill/dirty that. 5405274f052SJens Axboe */ 5415274f052SJens Axboe static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, 5425274f052SJens Axboe struct splice_desc *sd) 5435274f052SJens Axboe { 5445274f052SJens Axboe struct file *file = sd->file; 5455274f052SJens Axboe struct address_space *mapping = file->f_mapping; 5463e7ee3e7SJens Axboe gfp_t gfp_mask = mapping_gfp_mask(mapping); 547016b661eSJens Axboe unsigned int offset, this_len; 5485274f052SJens Axboe struct page *page; 5495274f052SJens Axboe pgoff_t index; 5503e7ee3e7SJens Axboe int ret; 5515274f052SJens Axboe 5525274f052SJens Axboe /* 55349d0b21bSJens Axboe * make sure the data in this buffer is uptodate 5545274f052SJens Axboe */ 555f84d7519SJens Axboe ret = buf->ops->pin(info, buf); 556f84d7519SJens Axboe if (unlikely(ret)) 557f84d7519SJens Axboe return ret; 5585274f052SJens Axboe 5595274f052SJens Axboe index = sd->pos >> PAGE_CACHE_SHIFT; 5605274f052SJens Axboe offset = sd->pos & ~PAGE_CACHE_MASK; 5615274f052SJens Axboe 562016b661eSJens Axboe this_len = sd->len; 563016b661eSJens Axboe if (this_len + offset > PAGE_CACHE_SIZE) 564016b661eSJens Axboe this_len = PAGE_CACHE_SIZE - offset; 565016b661eSJens Axboe 5665abc97aaSJens Axboe /* 5670568b409SJens Axboe * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full 5680568b409SJens Axboe * page. 5695abc97aaSJens Axboe */ 5700568b409SJens Axboe if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { 57183f9135bSJens Axboe /* 5721432873aSJens Axboe * If steal succeeds, buf->page is now pruned from the 5731432873aSJens Axboe * pagecache and we can reuse it. The page will also be 5741432873aSJens Axboe * locked on successful return. 57583f9135bSJens Axboe */ 5765abc97aaSJens Axboe if (buf->ops->steal(info, buf)) 5775abc97aaSJens Axboe goto find_page; 5785abc97aaSJens Axboe 5795abc97aaSJens Axboe page = buf->page; 58046e678c9SJens Axboe if (add_to_page_cache(page, mapping, index, gfp_mask)) { 58146e678c9SJens Axboe unlock_page(page); 5825abc97aaSJens Axboe goto find_page; 58346e678c9SJens Axboe } 5841432873aSJens Axboe 5851432873aSJens Axboe page_cache_get(page); 5861432873aSJens Axboe 5871432873aSJens Axboe if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 5881432873aSJens Axboe lru_cache_add(page); 5895abc97aaSJens Axboe } else { 5905274f052SJens Axboe find_page: 5919e0267c2SJens Axboe page = find_lock_page(mapping, index); 5929e0267c2SJens Axboe if (!page) { 5935274f052SJens Axboe ret = -ENOMEM; 5949e0267c2SJens Axboe page = page_cache_alloc_cold(mapping); 5959e0267c2SJens Axboe if (unlikely(!page)) 5969aefe431SDave Jones goto out_nomem; 5975274f052SJens Axboe 5985274f052SJens Axboe /* 5999e0267c2SJens Axboe * This will also lock the page 6009e0267c2SJens Axboe */ 6019e0267c2SJens Axboe ret = add_to_page_cache_lru(page, mapping, index, 6029e0267c2SJens Axboe gfp_mask); 6039e0267c2SJens Axboe if (unlikely(ret)) 6049e0267c2SJens Axboe goto out; 6059e0267c2SJens Axboe } 6069e0267c2SJens Axboe 6079e0267c2SJens Axboe /* 6089e0267c2SJens Axboe * We get here with the page locked. If the page is also 6099e0267c2SJens Axboe * uptodate, we don't need to do more. If it isn't, we 6109e0267c2SJens Axboe * may need to bring it in if we are not going to overwrite 6119e0267c2SJens Axboe * the full page. 6125274f052SJens Axboe */ 6135274f052SJens Axboe if (!PageUptodate(page)) { 614016b661eSJens Axboe if (this_len < PAGE_CACHE_SIZE) { 6155274f052SJens Axboe ret = mapping->a_ops->readpage(file, page); 6165274f052SJens Axboe if (unlikely(ret)) 6175274f052SJens Axboe goto out; 6185274f052SJens Axboe 6195274f052SJens Axboe lock_page(page); 6205274f052SJens Axboe 6215274f052SJens Axboe if (!PageUptodate(page)) { 6225274f052SJens Axboe /* 62373d62d83SIngo Molnar * Page got invalidated, repeat. 6245274f052SJens Axboe */ 6255274f052SJens Axboe if (!page->mapping) { 6265274f052SJens Axboe unlock_page(page); 6275274f052SJens Axboe page_cache_release(page); 6285274f052SJens Axboe goto find_page; 6295274f052SJens Axboe } 6305274f052SJens Axboe ret = -EIO; 6315274f052SJens Axboe goto out; 6325274f052SJens Axboe } 6339e0267c2SJens Axboe } else 6345274f052SJens Axboe SetPageUptodate(page); 6355274f052SJens Axboe } 6365274f052SJens Axboe } 6375274f052SJens Axboe 638016b661eSJens Axboe ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); 639bfc4ee39SJens Axboe if (unlikely(ret)) { 640bfc4ee39SJens Axboe loff_t isize = i_size_read(mapping->host); 641bfc4ee39SJens Axboe 642bfc4ee39SJens Axboe if (ret != AOP_TRUNCATED_PAGE) 643bfc4ee39SJens Axboe unlock_page(page); 6444f6f0bd2SJens Axboe page_cache_release(page); 645bfc4ee39SJens Axboe if (ret == AOP_TRUNCATED_PAGE) 6464f6f0bd2SJens Axboe goto find_page; 647bfc4ee39SJens Axboe 648bfc4ee39SJens Axboe /* 649bfc4ee39SJens Axboe * prepare_write() may have instantiated a few blocks 650bfc4ee39SJens Axboe * outside i_size. Trim these off again. 651bfc4ee39SJens Axboe */ 652bfc4ee39SJens Axboe if (sd->pos + this_len > isize) 653bfc4ee39SJens Axboe vmtruncate(mapping->host, isize); 654bfc4ee39SJens Axboe 6555274f052SJens Axboe goto out; 656bfc4ee39SJens Axboe } 6575274f052SJens Axboe 6580568b409SJens Axboe if (buf->page != page) { 659f84d7519SJens Axboe /* 660f84d7519SJens Axboe * Careful, ->map() uses KM_USER0! 661f84d7519SJens Axboe */ 662f6762b7aSJens Axboe char *src = buf->ops->map(info, buf, 1); 663f84d7519SJens Axboe char *dst = kmap_atomic(page, KM_USER1); 6645abc97aaSJens Axboe 665016b661eSJens Axboe memcpy(dst + offset, src + buf->offset, this_len); 6665274f052SJens Axboe flush_dcache_page(page); 667f84d7519SJens Axboe kunmap_atomic(dst, KM_USER1); 668f6762b7aSJens Axboe buf->ops->unmap(info, buf, src); 6695abc97aaSJens Axboe } 6705274f052SJens Axboe 671016b661eSJens Axboe ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); 6720568b409SJens Axboe if (!ret) { 673016b661eSJens Axboe /* 6740568b409SJens Axboe * Return the number of bytes written and mark page as 6750568b409SJens Axboe * accessed, we are now done! 676016b661eSJens Axboe */ 677016b661eSJens Axboe ret = this_len; 678c7f21e4fSJens Axboe mark_page_accessed(page); 6794f6f0bd2SJens Axboe balance_dirty_pages_ratelimited(mapping); 6800568b409SJens Axboe } else if (ret == AOP_TRUNCATED_PAGE) { 6815274f052SJens Axboe page_cache_release(page); 6820568b409SJens Axboe goto find_page; 6830568b409SJens Axboe } 6840568b409SJens Axboe out: 6850568b409SJens Axboe page_cache_release(page); 6864f6f0bd2SJens Axboe unlock_page(page); 6879aefe431SDave Jones out_nomem: 6885274f052SJens Axboe return ret; 6895274f052SJens Axboe } 6905274f052SJens Axboe 69183f9135bSJens Axboe /* 69283f9135bSJens Axboe * Pipe input worker. Most of this logic works like a regular pipe, the 69383f9135bSJens Axboe * key here is the 'actor' worker passed in that actually moves the data 69483f9135bSJens Axboe * to the wanted destination. See pipe_to_file/pipe_to_sendpage above. 69583f9135bSJens Axboe */ 69600522fb4SJens Axboe ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, 697cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags, 6985274f052SJens Axboe splice_actor *actor) 6995274f052SJens Axboe { 7005274f052SJens Axboe int ret, do_wakeup, err; 7015274f052SJens Axboe struct splice_desc sd; 7025274f052SJens Axboe 7035274f052SJens Axboe ret = 0; 7045274f052SJens Axboe do_wakeup = 0; 7055274f052SJens Axboe 7065274f052SJens Axboe sd.total_len = len; 7075274f052SJens Axboe sd.flags = flags; 7085274f052SJens Axboe sd.file = out; 709cbb7e577SJens Axboe sd.pos = *ppos; 7105274f052SJens Axboe 7113a326a2cSIngo Molnar if (pipe->inode) 7123a326a2cSIngo Molnar mutex_lock(&pipe->inode->i_mutex); 7135274f052SJens Axboe 7145274f052SJens Axboe for (;;) { 7156f767b04SJens Axboe if (pipe->nrbufs) { 7166f767b04SJens Axboe struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; 7175274f052SJens Axboe struct pipe_buf_operations *ops = buf->ops; 7185274f052SJens Axboe 7195274f052SJens Axboe sd.len = buf->len; 7205274f052SJens Axboe if (sd.len > sd.total_len) 7215274f052SJens Axboe sd.len = sd.total_len; 7225274f052SJens Axboe 7233a326a2cSIngo Molnar err = actor(pipe, buf, &sd); 724016b661eSJens Axboe if (err <= 0) { 7255274f052SJens Axboe if (!ret && err != -ENODATA) 7265274f052SJens Axboe ret = err; 7275274f052SJens Axboe 7285274f052SJens Axboe break; 7295274f052SJens Axboe } 7305274f052SJens Axboe 731016b661eSJens Axboe ret += err; 732016b661eSJens Axboe buf->offset += err; 733016b661eSJens Axboe buf->len -= err; 734016b661eSJens Axboe 735016b661eSJens Axboe sd.len -= err; 736016b661eSJens Axboe sd.pos += err; 737016b661eSJens Axboe sd.total_len -= err; 738016b661eSJens Axboe if (sd.len) 739016b661eSJens Axboe continue; 74073d62d83SIngo Molnar 7415274f052SJens Axboe if (!buf->len) { 7425274f052SJens Axboe buf->ops = NULL; 7433a326a2cSIngo Molnar ops->release(pipe, buf); 7446f767b04SJens Axboe pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1); 7456f767b04SJens Axboe pipe->nrbufs--; 7466f767b04SJens Axboe if (pipe->inode) 7475274f052SJens Axboe do_wakeup = 1; 7485274f052SJens Axboe } 7495274f052SJens Axboe 7505274f052SJens Axboe if (!sd.total_len) 7515274f052SJens Axboe break; 7525274f052SJens Axboe } 7535274f052SJens Axboe 7546f767b04SJens Axboe if (pipe->nrbufs) 7555274f052SJens Axboe continue; 7563a326a2cSIngo Molnar if (!pipe->writers) 7575274f052SJens Axboe break; 7583a326a2cSIngo Molnar if (!pipe->waiting_writers) { 7595274f052SJens Axboe if (ret) 7605274f052SJens Axboe break; 7615274f052SJens Axboe } 7625274f052SJens Axboe 76329e35094SLinus Torvalds if (flags & SPLICE_F_NONBLOCK) { 76429e35094SLinus Torvalds if (!ret) 76529e35094SLinus Torvalds ret = -EAGAIN; 76629e35094SLinus Torvalds break; 76729e35094SLinus Torvalds } 76829e35094SLinus Torvalds 7695274f052SJens Axboe if (signal_pending(current)) { 7705274f052SJens Axboe if (!ret) 7715274f052SJens Axboe ret = -ERESTARTSYS; 7725274f052SJens Axboe break; 7735274f052SJens Axboe } 7745274f052SJens Axboe 7755274f052SJens Axboe if (do_wakeup) { 776c0bd1f65SJens Axboe smp_mb(); 7773a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 7783a326a2cSIngo Molnar wake_up_interruptible_sync(&pipe->wait); 7793a326a2cSIngo Molnar kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 7805274f052SJens Axboe do_wakeup = 0; 7815274f052SJens Axboe } 7825274f052SJens Axboe 7833a326a2cSIngo Molnar pipe_wait(pipe); 7845274f052SJens Axboe } 7855274f052SJens Axboe 7863a326a2cSIngo Molnar if (pipe->inode) 7873a326a2cSIngo Molnar mutex_unlock(&pipe->inode->i_mutex); 7885274f052SJens Axboe 7895274f052SJens Axboe if (do_wakeup) { 790c0bd1f65SJens Axboe smp_mb(); 7913a326a2cSIngo Molnar if (waitqueue_active(&pipe->wait)) 7923a326a2cSIngo Molnar wake_up_interruptible(&pipe->wait); 7933a326a2cSIngo Molnar kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 7945274f052SJens Axboe } 7955274f052SJens Axboe 7965274f052SJens Axboe return ret; 7975274f052SJens Axboe } 7985274f052SJens Axboe 79983f9135bSJens Axboe /** 80083f9135bSJens Axboe * generic_file_splice_write - splice data from a pipe to a file 8013a326a2cSIngo Molnar * @pipe: pipe info 80283f9135bSJens Axboe * @out: file to write to 80383f9135bSJens Axboe * @len: number of bytes to splice 80483f9135bSJens Axboe * @flags: splice modifier flags 80583f9135bSJens Axboe * 80683f9135bSJens Axboe * Will either move or copy pages (determined by @flags options) from 80783f9135bSJens Axboe * the given pipe inode to the given file. 80883f9135bSJens Axboe * 80983f9135bSJens Axboe */ 8103a326a2cSIngo Molnar ssize_t 8113a326a2cSIngo Molnar generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, 812cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 8135274f052SJens Axboe { 8144f6f0bd2SJens Axboe struct address_space *mapping = out->f_mapping; 8153a326a2cSIngo Molnar ssize_t ret; 8163a326a2cSIngo Molnar 81700522fb4SJens Axboe ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); 818a4514ebdSJens Axboe if (ret > 0) { 819a4514ebdSJens Axboe struct inode *inode = mapping->host; 820a4514ebdSJens Axboe 821a4514ebdSJens Axboe *ppos += ret; 8224f6f0bd2SJens Axboe 8234f6f0bd2SJens Axboe /* 824a4514ebdSJens Axboe * If file or inode is SYNC and we actually wrote some data, 825a4514ebdSJens Axboe * sync it. 8264f6f0bd2SJens Axboe */ 827a4514ebdSJens Axboe if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { 8284f6f0bd2SJens Axboe int err; 8294f6f0bd2SJens Axboe 8304f6f0bd2SJens Axboe mutex_lock(&inode->i_mutex); 831a4514ebdSJens Axboe err = generic_osync_inode(inode, mapping, 8324f6f0bd2SJens Axboe OSYNC_METADATA|OSYNC_DATA); 8334f6f0bd2SJens Axboe mutex_unlock(&inode->i_mutex); 8344f6f0bd2SJens Axboe 8354f6f0bd2SJens Axboe if (err) 8364f6f0bd2SJens Axboe ret = err; 8374f6f0bd2SJens Axboe } 838a4514ebdSJens Axboe } 8394f6f0bd2SJens Axboe 8404f6f0bd2SJens Axboe return ret; 8415274f052SJens Axboe } 8425274f052SJens Axboe 843059a8f37SJens Axboe EXPORT_SYMBOL(generic_file_splice_write); 844059a8f37SJens Axboe 84583f9135bSJens Axboe /** 84683f9135bSJens Axboe * generic_splice_sendpage - splice data from a pipe to a socket 84783f9135bSJens Axboe * @inode: pipe inode 84883f9135bSJens Axboe * @out: socket to write to 84983f9135bSJens Axboe * @len: number of bytes to splice 85083f9135bSJens Axboe * @flags: splice modifier flags 85183f9135bSJens Axboe * 85283f9135bSJens Axboe * Will send @len bytes from the pipe to a network socket. No data copying 85383f9135bSJens Axboe * is involved. 85483f9135bSJens Axboe * 85583f9135bSJens Axboe */ 8563a326a2cSIngo Molnar ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, 857cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 8585274f052SJens Axboe { 85900522fb4SJens Axboe return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); 8605274f052SJens Axboe } 8615274f052SJens Axboe 862059a8f37SJens Axboe EXPORT_SYMBOL(generic_splice_sendpage); 863a0f06780SJeff Garzik 86483f9135bSJens Axboe /* 86583f9135bSJens Axboe * Attempt to initiate a splice from pipe to file. 86683f9135bSJens Axboe */ 8673a326a2cSIngo Molnar static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 868cbb7e577SJens Axboe loff_t *ppos, size_t len, unsigned int flags) 8695274f052SJens Axboe { 8705274f052SJens Axboe int ret; 8715274f052SJens Axboe 87249570e9bSJens Axboe if (unlikely(!out->f_op || !out->f_op->splice_write)) 8735274f052SJens Axboe return -EINVAL; 8745274f052SJens Axboe 87549570e9bSJens Axboe if (unlikely(!(out->f_mode & FMODE_WRITE))) 8765274f052SJens Axboe return -EBADF; 8775274f052SJens Axboe 878cbb7e577SJens Axboe ret = rw_verify_area(WRITE, out, ppos, len); 8795274f052SJens Axboe if (unlikely(ret < 0)) 8805274f052SJens Axboe return ret; 8815274f052SJens Axboe 882cbb7e577SJens Axboe return out->f_op->splice_write(pipe, out, ppos, len, flags); 8835274f052SJens Axboe } 8845274f052SJens Axboe 88583f9135bSJens Axboe /* 88683f9135bSJens Axboe * Attempt to initiate a splice from a file to a pipe. 88783f9135bSJens Axboe */ 888cbb7e577SJens Axboe static long do_splice_to(struct file *in, loff_t *ppos, 889cbb7e577SJens Axboe struct pipe_inode_info *pipe, size_t len, 890cbb7e577SJens Axboe unsigned int flags) 8915274f052SJens Axboe { 892cbb7e577SJens Axboe loff_t isize, left; 8935274f052SJens Axboe int ret; 8945274f052SJens Axboe 89549570e9bSJens Axboe if (unlikely(!in->f_op || !in->f_op->splice_read)) 8965274f052SJens Axboe return -EINVAL; 8975274f052SJens Axboe 89849570e9bSJens Axboe if (unlikely(!(in->f_mode & FMODE_READ))) 8995274f052SJens Axboe return -EBADF; 9005274f052SJens Axboe 901cbb7e577SJens Axboe ret = rw_verify_area(READ, in, ppos, len); 9025274f052SJens Axboe if (unlikely(ret < 0)) 9035274f052SJens Axboe return ret; 9045274f052SJens Axboe 9055274f052SJens Axboe isize = i_size_read(in->f_mapping->host); 906cbb7e577SJens Axboe if (unlikely(*ppos >= isize)) 9075274f052SJens Axboe return 0; 9085274f052SJens Axboe 909cbb7e577SJens Axboe left = isize - *ppos; 91049570e9bSJens Axboe if (unlikely(left < len)) 9115274f052SJens Axboe len = left; 9125274f052SJens Axboe 913cbb7e577SJens Axboe return in->f_op->splice_read(in, ppos, pipe, len, flags); 9145274f052SJens Axboe } 9155274f052SJens Axboe 916cbb7e577SJens Axboe long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 917cbb7e577SJens Axboe size_t len, unsigned int flags) 918b92ce558SJens Axboe { 919b92ce558SJens Axboe struct pipe_inode_info *pipe; 920b92ce558SJens Axboe long ret, bytes; 921cbb7e577SJens Axboe loff_t out_off; 922b92ce558SJens Axboe umode_t i_mode; 923b92ce558SJens Axboe int i; 924b92ce558SJens Axboe 925b92ce558SJens Axboe /* 926b92ce558SJens Axboe * We require the input being a regular file, as we don't want to 927b92ce558SJens Axboe * randomly drop data for eg socket -> socket splicing. Use the 928b92ce558SJens Axboe * piped splicing for that! 929b92ce558SJens Axboe */ 930b92ce558SJens Axboe i_mode = in->f_dentry->d_inode->i_mode; 931b92ce558SJens Axboe if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) 932b92ce558SJens Axboe return -EINVAL; 933b92ce558SJens Axboe 934b92ce558SJens Axboe /* 935b92ce558SJens Axboe * neither in nor out is a pipe, setup an internal pipe attached to 936b92ce558SJens Axboe * 'out' and transfer the wanted data from 'in' to 'out' through that 937b92ce558SJens Axboe */ 938b92ce558SJens Axboe pipe = current->splice_pipe; 93949570e9bSJens Axboe if (unlikely(!pipe)) { 940b92ce558SJens Axboe pipe = alloc_pipe_info(NULL); 941b92ce558SJens Axboe if (!pipe) 942b92ce558SJens Axboe return -ENOMEM; 943b92ce558SJens Axboe 944b92ce558SJens Axboe /* 945b92ce558SJens Axboe * We don't have an immediate reader, but we'll read the stuff 94600522fb4SJens Axboe * out of the pipe right after the splice_to_pipe(). So set 947b92ce558SJens Axboe * PIPE_READERS appropriately. 948b92ce558SJens Axboe */ 949b92ce558SJens Axboe pipe->readers = 1; 950b92ce558SJens Axboe 951b92ce558SJens Axboe current->splice_pipe = pipe; 952b92ce558SJens Axboe } 953b92ce558SJens Axboe 954b92ce558SJens Axboe /* 95573d62d83SIngo Molnar * Do the splice. 956b92ce558SJens Axboe */ 957b92ce558SJens Axboe ret = 0; 958b92ce558SJens Axboe bytes = 0; 959cbb7e577SJens Axboe out_off = 0; 960b92ce558SJens Axboe 961b92ce558SJens Axboe while (len) { 962b92ce558SJens Axboe size_t read_len, max_read_len; 963b92ce558SJens Axboe 964b92ce558SJens Axboe /* 965b92ce558SJens Axboe * Do at most PIPE_BUFFERS pages worth of transfer: 966b92ce558SJens Axboe */ 967b92ce558SJens Axboe max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE)); 968b92ce558SJens Axboe 969cbb7e577SJens Axboe ret = do_splice_to(in, ppos, pipe, max_read_len, flags); 970b92ce558SJens Axboe if (unlikely(ret < 0)) 971b92ce558SJens Axboe goto out_release; 972b92ce558SJens Axboe 973b92ce558SJens Axboe read_len = ret; 974b92ce558SJens Axboe 975b92ce558SJens Axboe /* 976b92ce558SJens Axboe * NOTE: nonblocking mode only applies to the input. We 977b92ce558SJens Axboe * must not do the output in nonblocking mode as then we 978b92ce558SJens Axboe * could get stuck data in the internal pipe: 979b92ce558SJens Axboe */ 980cbb7e577SJens Axboe ret = do_splice_from(pipe, out, &out_off, read_len, 981b92ce558SJens Axboe flags & ~SPLICE_F_NONBLOCK); 982b92ce558SJens Axboe if (unlikely(ret < 0)) 983b92ce558SJens Axboe goto out_release; 984b92ce558SJens Axboe 985b92ce558SJens Axboe bytes += ret; 986b92ce558SJens Axboe len -= ret; 987b92ce558SJens Axboe 988b92ce558SJens Axboe /* 989b92ce558SJens Axboe * In nonblocking mode, if we got back a short read then 990b92ce558SJens Axboe * that was due to either an IO error or due to the 991b92ce558SJens Axboe * pagecache entry not being there. In the IO error case 992b92ce558SJens Axboe * the _next_ splice attempt will produce a clean IO error 993b92ce558SJens Axboe * return value (not a short read), so in both cases it's 994b92ce558SJens Axboe * correct to break out of the loop here: 995b92ce558SJens Axboe */ 996b92ce558SJens Axboe if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len)) 997b92ce558SJens Axboe break; 998b92ce558SJens Axboe } 999b92ce558SJens Axboe 1000b92ce558SJens Axboe pipe->nrbufs = pipe->curbuf = 0; 1001b92ce558SJens Axboe 1002b92ce558SJens Axboe return bytes; 1003b92ce558SJens Axboe 1004b92ce558SJens Axboe out_release: 1005b92ce558SJens Axboe /* 1006b92ce558SJens Axboe * If we did an incomplete transfer we must release 1007b92ce558SJens Axboe * the pipe buffers in question: 1008b92ce558SJens Axboe */ 1009b92ce558SJens Axboe for (i = 0; i < PIPE_BUFFERS; i++) { 1010b92ce558SJens Axboe struct pipe_buffer *buf = pipe->bufs + i; 1011b92ce558SJens Axboe 1012b92ce558SJens Axboe if (buf->ops) { 1013b92ce558SJens Axboe buf->ops->release(pipe, buf); 1014b92ce558SJens Axboe buf->ops = NULL; 1015b92ce558SJens Axboe } 1016b92ce558SJens Axboe } 1017b92ce558SJens Axboe pipe->nrbufs = pipe->curbuf = 0; 1018b92ce558SJens Axboe 1019b92ce558SJens Axboe /* 1020b92ce558SJens Axboe * If we transferred some data, return the number of bytes: 1021b92ce558SJens Axboe */ 1022b92ce558SJens Axboe if (bytes > 0) 1023b92ce558SJens Axboe return bytes; 1024b92ce558SJens Axboe 1025b92ce558SJens Axboe return ret; 1026b92ce558SJens Axboe } 1027b92ce558SJens Axboe 1028b92ce558SJens Axboe EXPORT_SYMBOL(do_splice_direct); 1029b92ce558SJens Axboe 103083f9135bSJens Axboe /* 103183f9135bSJens Axboe * Determine where to splice to/from. 103283f9135bSJens Axboe */ 1033529565dcSIngo Molnar static long do_splice(struct file *in, loff_t __user *off_in, 1034529565dcSIngo Molnar struct file *out, loff_t __user *off_out, 1035529565dcSIngo Molnar size_t len, unsigned int flags) 10365274f052SJens Axboe { 10373a326a2cSIngo Molnar struct pipe_inode_info *pipe; 1038cbb7e577SJens Axboe loff_t offset, *off; 1039a4514ebdSJens Axboe long ret; 10405274f052SJens Axboe 10413a326a2cSIngo Molnar pipe = in->f_dentry->d_inode->i_pipe; 1042529565dcSIngo Molnar if (pipe) { 1043529565dcSIngo Molnar if (off_in) 1044529565dcSIngo Molnar return -ESPIPE; 1045b92ce558SJens Axboe if (off_out) { 1046b92ce558SJens Axboe if (out->f_op->llseek == no_llseek) 1047b92ce558SJens Axboe return -EINVAL; 1048cbb7e577SJens Axboe if (copy_from_user(&offset, off_out, sizeof(loff_t))) 1049b92ce558SJens Axboe return -EFAULT; 1050cbb7e577SJens Axboe off = &offset; 1051cbb7e577SJens Axboe } else 1052cbb7e577SJens Axboe off = &out->f_pos; 1053529565dcSIngo Molnar 1054a4514ebdSJens Axboe ret = do_splice_from(pipe, out, off, len, flags); 1055a4514ebdSJens Axboe 1056a4514ebdSJens Axboe if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1057a4514ebdSJens Axboe ret = -EFAULT; 1058a4514ebdSJens Axboe 1059a4514ebdSJens Axboe return ret; 1060529565dcSIngo Molnar } 10615274f052SJens Axboe 10623a326a2cSIngo Molnar pipe = out->f_dentry->d_inode->i_pipe; 1063529565dcSIngo Molnar if (pipe) { 1064529565dcSIngo Molnar if (off_out) 1065529565dcSIngo Molnar return -ESPIPE; 1066b92ce558SJens Axboe if (off_in) { 1067b92ce558SJens Axboe if (in->f_op->llseek == no_llseek) 1068b92ce558SJens Axboe return -EINVAL; 1069cbb7e577SJens Axboe if (copy_from_user(&offset, off_in, sizeof(loff_t))) 1070b92ce558SJens Axboe return -EFAULT; 1071cbb7e577SJens Axboe off = &offset; 1072cbb7e577SJens Axboe } else 1073cbb7e577SJens Axboe off = &in->f_pos; 1074529565dcSIngo Molnar 1075a4514ebdSJens Axboe ret = do_splice_to(in, off, pipe, len, flags); 1076a4514ebdSJens Axboe 1077a4514ebdSJens Axboe if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1078a4514ebdSJens Axboe ret = -EFAULT; 1079a4514ebdSJens Axboe 1080a4514ebdSJens Axboe return ret; 1081529565dcSIngo Molnar } 10825274f052SJens Axboe 10835274f052SJens Axboe return -EINVAL; 10845274f052SJens Axboe } 10855274f052SJens Axboe 1086912d35f8SJens Axboe /* 1087912d35f8SJens Axboe * Map an iov into an array of pages and offset/length tupples. With the 1088912d35f8SJens Axboe * partial_page structure, we can map several non-contiguous ranges into 1089912d35f8SJens Axboe * our ones pages[] map instead of splitting that operation into pieces. 1090912d35f8SJens Axboe * Could easily be exported as a generic helper for other users, in which 1091912d35f8SJens Axboe * case one would probably want to add a 'max_nr_pages' parameter as well. 1092912d35f8SJens Axboe */ 1093912d35f8SJens Axboe static int get_iovec_page_array(const struct iovec __user *iov, 1094912d35f8SJens Axboe unsigned int nr_vecs, struct page **pages, 10957afa6fd0SJens Axboe struct partial_page *partial, int aligned) 1096912d35f8SJens Axboe { 1097912d35f8SJens Axboe int buffers = 0, error = 0; 1098912d35f8SJens Axboe 1099912d35f8SJens Axboe /* 1100912d35f8SJens Axboe * It's ok to take the mmap_sem for reading, even 1101912d35f8SJens Axboe * across a "get_user()". 1102912d35f8SJens Axboe */ 1103912d35f8SJens Axboe down_read(¤t->mm->mmap_sem); 1104912d35f8SJens Axboe 1105912d35f8SJens Axboe while (nr_vecs) { 1106912d35f8SJens Axboe unsigned long off, npages; 1107912d35f8SJens Axboe void __user *base; 1108912d35f8SJens Axboe size_t len; 1109912d35f8SJens Axboe int i; 1110912d35f8SJens Axboe 1111912d35f8SJens Axboe /* 1112912d35f8SJens Axboe * Get user address base and length for this iovec. 1113912d35f8SJens Axboe */ 1114912d35f8SJens Axboe error = get_user(base, &iov->iov_base); 1115912d35f8SJens Axboe if (unlikely(error)) 1116912d35f8SJens Axboe break; 1117912d35f8SJens Axboe error = get_user(len, &iov->iov_len); 1118912d35f8SJens Axboe if (unlikely(error)) 1119912d35f8SJens Axboe break; 1120912d35f8SJens Axboe 1121912d35f8SJens Axboe /* 1122912d35f8SJens Axboe * Sanity check this iovec. 0 read succeeds. 1123912d35f8SJens Axboe */ 1124912d35f8SJens Axboe if (unlikely(!len)) 1125912d35f8SJens Axboe break; 1126912d35f8SJens Axboe error = -EFAULT; 1127912d35f8SJens Axboe if (unlikely(!base)) 1128912d35f8SJens Axboe break; 1129912d35f8SJens Axboe 1130912d35f8SJens Axboe /* 1131912d35f8SJens Axboe * Get this base offset and number of pages, then map 1132912d35f8SJens Axboe * in the user pages. 1133912d35f8SJens Axboe */ 1134912d35f8SJens Axboe off = (unsigned long) base & ~PAGE_MASK; 11357afa6fd0SJens Axboe 11367afa6fd0SJens Axboe /* 11377afa6fd0SJens Axboe * If asked for alignment, the offset must be zero and the 11387afa6fd0SJens Axboe * length a multiple of the PAGE_SIZE. 11397afa6fd0SJens Axboe */ 11407afa6fd0SJens Axboe error = -EINVAL; 11417afa6fd0SJens Axboe if (aligned && (off || len & ~PAGE_MASK)) 11427afa6fd0SJens Axboe break; 11437afa6fd0SJens Axboe 1144912d35f8SJens Axboe npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1145912d35f8SJens Axboe if (npages > PIPE_BUFFERS - buffers) 1146912d35f8SJens Axboe npages = PIPE_BUFFERS - buffers; 1147912d35f8SJens Axboe 1148912d35f8SJens Axboe error = get_user_pages(current, current->mm, 1149912d35f8SJens Axboe (unsigned long) base, npages, 0, 0, 1150912d35f8SJens Axboe &pages[buffers], NULL); 1151912d35f8SJens Axboe 1152912d35f8SJens Axboe if (unlikely(error <= 0)) 1153912d35f8SJens Axboe break; 1154912d35f8SJens Axboe 1155912d35f8SJens Axboe /* 1156912d35f8SJens Axboe * Fill this contiguous range into the partial page map. 1157912d35f8SJens Axboe */ 1158912d35f8SJens Axboe for (i = 0; i < error; i++) { 11597591489aSJens Axboe const int plen = min_t(size_t, len, PAGE_SIZE - off); 1160912d35f8SJens Axboe 1161912d35f8SJens Axboe partial[buffers].offset = off; 1162912d35f8SJens Axboe partial[buffers].len = plen; 1163912d35f8SJens Axboe 1164912d35f8SJens Axboe off = 0; 1165912d35f8SJens Axboe len -= plen; 1166912d35f8SJens Axboe buffers++; 1167912d35f8SJens Axboe } 1168912d35f8SJens Axboe 1169912d35f8SJens Axboe /* 1170912d35f8SJens Axboe * We didn't complete this iov, stop here since it probably 1171912d35f8SJens Axboe * means we have to move some of this into a pipe to 1172912d35f8SJens Axboe * be able to continue. 1173912d35f8SJens Axboe */ 1174912d35f8SJens Axboe if (len) 1175912d35f8SJens Axboe break; 1176912d35f8SJens Axboe 1177912d35f8SJens Axboe /* 1178912d35f8SJens Axboe * Don't continue if we mapped fewer pages than we asked for, 1179912d35f8SJens Axboe * or if we mapped the max number of pages that we have 1180912d35f8SJens Axboe * room for. 1181912d35f8SJens Axboe */ 1182912d35f8SJens Axboe if (error < npages || buffers == PIPE_BUFFERS) 1183912d35f8SJens Axboe break; 1184912d35f8SJens Axboe 1185912d35f8SJens Axboe nr_vecs--; 1186912d35f8SJens Axboe iov++; 1187912d35f8SJens Axboe } 1188912d35f8SJens Axboe 1189912d35f8SJens Axboe up_read(¤t->mm->mmap_sem); 1190912d35f8SJens Axboe 1191912d35f8SJens Axboe if (buffers) 1192912d35f8SJens Axboe return buffers; 1193912d35f8SJens Axboe 1194912d35f8SJens Axboe return error; 1195912d35f8SJens Axboe } 1196912d35f8SJens Axboe 1197912d35f8SJens Axboe /* 1198912d35f8SJens Axboe * vmsplice splices a user address range into a pipe. It can be thought of 1199912d35f8SJens Axboe * as splice-from-memory, where the regular splice is splice-from-file (or 1200912d35f8SJens Axboe * to file). In both cases the output is a pipe, naturally. 1201912d35f8SJens Axboe * 1202912d35f8SJens Axboe * Note that vmsplice only supports splicing _from_ user memory to a pipe, 1203912d35f8SJens Axboe * not the other way around. Splicing from user memory is a simple operation 1204912d35f8SJens Axboe * that can be supported without any funky alignment restrictions or nasty 1205912d35f8SJens Axboe * vm tricks. We simply map in the user memory and fill them into a pipe. 1206912d35f8SJens Axboe * The reverse isn't quite as easy, though. There are two possible solutions 1207912d35f8SJens Axboe * for that: 1208912d35f8SJens Axboe * 1209912d35f8SJens Axboe * - memcpy() the data internally, at which point we might as well just 1210912d35f8SJens Axboe * do a regular read() on the buffer anyway. 1211912d35f8SJens Axboe * - Lots of nasty vm tricks, that are neither fast nor flexible (it 1212912d35f8SJens Axboe * has restriction limitations on both ends of the pipe). 1213912d35f8SJens Axboe * 1214912d35f8SJens Axboe * Alas, it isn't here. 1215912d35f8SJens Axboe * 1216912d35f8SJens Axboe */ 1217912d35f8SJens Axboe static long do_vmsplice(struct file *file, const struct iovec __user *iov, 1218912d35f8SJens Axboe unsigned long nr_segs, unsigned int flags) 1219912d35f8SJens Axboe { 1220912d35f8SJens Axboe struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe; 1221912d35f8SJens Axboe struct page *pages[PIPE_BUFFERS]; 1222912d35f8SJens Axboe struct partial_page partial[PIPE_BUFFERS]; 1223912d35f8SJens Axboe struct splice_pipe_desc spd = { 1224912d35f8SJens Axboe .pages = pages, 1225912d35f8SJens Axboe .partial = partial, 1226912d35f8SJens Axboe .flags = flags, 1227912d35f8SJens Axboe .ops = &user_page_pipe_buf_ops, 1228912d35f8SJens Axboe }; 1229912d35f8SJens Axboe 1230912d35f8SJens Axboe if (unlikely(!pipe)) 1231912d35f8SJens Axboe return -EBADF; 1232912d35f8SJens Axboe if (unlikely(nr_segs > UIO_MAXIOV)) 1233912d35f8SJens Axboe return -EINVAL; 1234912d35f8SJens Axboe else if (unlikely(!nr_segs)) 1235912d35f8SJens Axboe return 0; 1236912d35f8SJens Axboe 12377afa6fd0SJens Axboe spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial, 12387afa6fd0SJens Axboe flags & SPLICE_F_GIFT); 1239912d35f8SJens Axboe if (spd.nr_pages <= 0) 1240912d35f8SJens Axboe return spd.nr_pages; 1241912d35f8SJens Axboe 124200522fb4SJens Axboe return splice_to_pipe(pipe, &spd); 1243912d35f8SJens Axboe } 1244912d35f8SJens Axboe 1245912d35f8SJens Axboe asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov, 1246912d35f8SJens Axboe unsigned long nr_segs, unsigned int flags) 1247912d35f8SJens Axboe { 1248912d35f8SJens Axboe struct file *file; 1249912d35f8SJens Axboe long error; 1250912d35f8SJens Axboe int fput; 1251912d35f8SJens Axboe 1252912d35f8SJens Axboe error = -EBADF; 1253912d35f8SJens Axboe file = fget_light(fd, &fput); 1254912d35f8SJens Axboe if (file) { 1255912d35f8SJens Axboe if (file->f_mode & FMODE_WRITE) 1256912d35f8SJens Axboe error = do_vmsplice(file, iov, nr_segs, flags); 1257912d35f8SJens Axboe 1258912d35f8SJens Axboe fput_light(file, fput); 1259912d35f8SJens Axboe } 1260912d35f8SJens Axboe 1261912d35f8SJens Axboe return error; 1262912d35f8SJens Axboe } 1263912d35f8SJens Axboe 1264529565dcSIngo Molnar asmlinkage long sys_splice(int fd_in, loff_t __user *off_in, 1265529565dcSIngo Molnar int fd_out, loff_t __user *off_out, 1266529565dcSIngo Molnar size_t len, unsigned int flags) 12675274f052SJens Axboe { 12685274f052SJens Axboe long error; 12695274f052SJens Axboe struct file *in, *out; 12705274f052SJens Axboe int fput_in, fput_out; 12715274f052SJens Axboe 12725274f052SJens Axboe if (unlikely(!len)) 12735274f052SJens Axboe return 0; 12745274f052SJens Axboe 12755274f052SJens Axboe error = -EBADF; 1276529565dcSIngo Molnar in = fget_light(fd_in, &fput_in); 12775274f052SJens Axboe if (in) { 12785274f052SJens Axboe if (in->f_mode & FMODE_READ) { 1279529565dcSIngo Molnar out = fget_light(fd_out, &fput_out); 12805274f052SJens Axboe if (out) { 12815274f052SJens Axboe if (out->f_mode & FMODE_WRITE) 1282529565dcSIngo Molnar error = do_splice(in, off_in, 1283529565dcSIngo Molnar out, off_out, 1284529565dcSIngo Molnar len, flags); 12855274f052SJens Axboe fput_light(out, fput_out); 12865274f052SJens Axboe } 12875274f052SJens Axboe } 12885274f052SJens Axboe 12895274f052SJens Axboe fput_light(in, fput_in); 12905274f052SJens Axboe } 12915274f052SJens Axboe 12925274f052SJens Axboe return error; 12935274f052SJens Axboe } 129470524490SJens Axboe 129570524490SJens Axboe /* 129670524490SJens Axboe * Link contents of ipipe to opipe. 129770524490SJens Axboe */ 129870524490SJens Axboe static int link_pipe(struct pipe_inode_info *ipipe, 129970524490SJens Axboe struct pipe_inode_info *opipe, 130070524490SJens Axboe size_t len, unsigned int flags) 130170524490SJens Axboe { 130270524490SJens Axboe struct pipe_buffer *ibuf, *obuf; 13032a27250eSJens Axboe int ret, do_wakeup, i, ipipe_first; 13042a27250eSJens Axboe 13052a27250eSJens Axboe ret = do_wakeup = ipipe_first = 0; 130670524490SJens Axboe 130770524490SJens Axboe /* 130870524490SJens Axboe * Potential ABBA deadlock, work around it by ordering lock 130970524490SJens Axboe * grabbing by inode address. Otherwise two different processes 131070524490SJens Axboe * could deadlock (one doing tee from A -> B, the other from B -> A). 131170524490SJens Axboe */ 131270524490SJens Axboe if (ipipe->inode < opipe->inode) { 13132a27250eSJens Axboe ipipe_first = 1; 131470524490SJens Axboe mutex_lock(&ipipe->inode->i_mutex); 131570524490SJens Axboe mutex_lock(&opipe->inode->i_mutex); 131670524490SJens Axboe } else { 131770524490SJens Axboe mutex_lock(&opipe->inode->i_mutex); 131870524490SJens Axboe mutex_lock(&ipipe->inode->i_mutex); 131970524490SJens Axboe } 132070524490SJens Axboe 132170524490SJens Axboe for (i = 0;; i++) { 132270524490SJens Axboe if (!opipe->readers) { 132370524490SJens Axboe send_sig(SIGPIPE, current, 0); 132470524490SJens Axboe if (!ret) 132570524490SJens Axboe ret = -EPIPE; 132670524490SJens Axboe break; 132770524490SJens Axboe } 132870524490SJens Axboe if (ipipe->nrbufs - i) { 132970524490SJens Axboe ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1)); 133070524490SJens Axboe 133170524490SJens Axboe /* 133270524490SJens Axboe * If we have room, fill this buffer 133370524490SJens Axboe */ 133470524490SJens Axboe if (opipe->nrbufs < PIPE_BUFFERS) { 133570524490SJens Axboe int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1); 133670524490SJens Axboe 133770524490SJens Axboe /* 133870524490SJens Axboe * Get a reference to this pipe buffer, 133970524490SJens Axboe * so we can copy the contents over. 134070524490SJens Axboe */ 134170524490SJens Axboe ibuf->ops->get(ipipe, ibuf); 134270524490SJens Axboe 134370524490SJens Axboe obuf = opipe->bufs + nbuf; 134470524490SJens Axboe *obuf = *ibuf; 134570524490SJens Axboe 13467afa6fd0SJens Axboe /* 13477afa6fd0SJens Axboe * Don't inherit the gift flag, we need to 13487afa6fd0SJens Axboe * prevent multiple steals of this page. 13497afa6fd0SJens Axboe */ 13507afa6fd0SJens Axboe obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 13517afa6fd0SJens Axboe 135270524490SJens Axboe if (obuf->len > len) 135370524490SJens Axboe obuf->len = len; 135470524490SJens Axboe 135570524490SJens Axboe opipe->nrbufs++; 135670524490SJens Axboe do_wakeup = 1; 135770524490SJens Axboe ret += obuf->len; 135870524490SJens Axboe len -= obuf->len; 135970524490SJens Axboe 136070524490SJens Axboe if (!len) 136170524490SJens Axboe break; 136270524490SJens Axboe if (opipe->nrbufs < PIPE_BUFFERS) 136370524490SJens Axboe continue; 136470524490SJens Axboe } 136570524490SJens Axboe 136670524490SJens Axboe /* 136770524490SJens Axboe * We have input available, but no output room. 13682a27250eSJens Axboe * If we already copied data, return that. If we 13692a27250eSJens Axboe * need to drop the opipe lock, it must be ordered 13702a27250eSJens Axboe * last to avoid deadlocks. 137170524490SJens Axboe */ 13722a27250eSJens Axboe if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) { 137370524490SJens Axboe if (!ret) 137470524490SJens Axboe ret = -EAGAIN; 137570524490SJens Axboe break; 137670524490SJens Axboe } 137770524490SJens Axboe if (signal_pending(current)) { 137870524490SJens Axboe if (!ret) 137970524490SJens Axboe ret = -ERESTARTSYS; 138070524490SJens Axboe break; 138170524490SJens Axboe } 138270524490SJens Axboe if (do_wakeup) { 138370524490SJens Axboe smp_mb(); 138470524490SJens Axboe if (waitqueue_active(&opipe->wait)) 138570524490SJens Axboe wake_up_interruptible(&opipe->wait); 138670524490SJens Axboe kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); 138770524490SJens Axboe do_wakeup = 0; 138870524490SJens Axboe } 138970524490SJens Axboe 139070524490SJens Axboe opipe->waiting_writers++; 139170524490SJens Axboe pipe_wait(opipe); 139270524490SJens Axboe opipe->waiting_writers--; 139370524490SJens Axboe continue; 139470524490SJens Axboe } 139570524490SJens Axboe 139670524490SJens Axboe /* 139770524490SJens Axboe * No input buffers, do the usual checks for available 139870524490SJens Axboe * writers and blocking and wait if necessary 139970524490SJens Axboe */ 140070524490SJens Axboe if (!ipipe->writers) 140170524490SJens Axboe break; 140270524490SJens Axboe if (!ipipe->waiting_writers) { 140370524490SJens Axboe if (ret) 140470524490SJens Axboe break; 140570524490SJens Axboe } 14062a27250eSJens Axboe /* 14072a27250eSJens Axboe * pipe_wait() drops the ipipe mutex. To avoid deadlocks 14082a27250eSJens Axboe * with another process, we can only safely do that if 14092a27250eSJens Axboe * the ipipe lock is ordered last. 14102a27250eSJens Axboe */ 14112a27250eSJens Axboe if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) { 141270524490SJens Axboe if (!ret) 141370524490SJens Axboe ret = -EAGAIN; 141470524490SJens Axboe break; 141570524490SJens Axboe } 141670524490SJens Axboe if (signal_pending(current)) { 141770524490SJens Axboe if (!ret) 141870524490SJens Axboe ret = -ERESTARTSYS; 141970524490SJens Axboe break; 142070524490SJens Axboe } 142170524490SJens Axboe 142270524490SJens Axboe if (waitqueue_active(&ipipe->wait)) 142370524490SJens Axboe wake_up_interruptible_sync(&ipipe->wait); 142470524490SJens Axboe kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT); 142570524490SJens Axboe 142670524490SJens Axboe pipe_wait(ipipe); 142770524490SJens Axboe } 142870524490SJens Axboe 142970524490SJens Axboe mutex_unlock(&ipipe->inode->i_mutex); 143070524490SJens Axboe mutex_unlock(&opipe->inode->i_mutex); 143170524490SJens Axboe 143270524490SJens Axboe if (do_wakeup) { 143370524490SJens Axboe smp_mb(); 143470524490SJens Axboe if (waitqueue_active(&opipe->wait)) 143570524490SJens Axboe wake_up_interruptible(&opipe->wait); 143670524490SJens Axboe kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); 143770524490SJens Axboe } 143870524490SJens Axboe 143970524490SJens Axboe return ret; 144070524490SJens Axboe } 144170524490SJens Axboe 144270524490SJens Axboe /* 144370524490SJens Axboe * This is a tee(1) implementation that works on pipes. It doesn't copy 144470524490SJens Axboe * any data, it simply references the 'in' pages on the 'out' pipe. 144570524490SJens Axboe * The 'flags' used are the SPLICE_F_* variants, currently the only 144670524490SJens Axboe * applicable one is SPLICE_F_NONBLOCK. 144770524490SJens Axboe */ 144870524490SJens Axboe static long do_tee(struct file *in, struct file *out, size_t len, 144970524490SJens Axboe unsigned int flags) 145070524490SJens Axboe { 145170524490SJens Axboe struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe; 145270524490SJens Axboe struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe; 145370524490SJens Axboe 145470524490SJens Axboe /* 145570524490SJens Axboe * Link ipipe to the two output pipes, consuming as we go along. 145670524490SJens Axboe */ 145770524490SJens Axboe if (ipipe && opipe) 145870524490SJens Axboe return link_pipe(ipipe, opipe, len, flags); 145970524490SJens Axboe 146070524490SJens Axboe return -EINVAL; 146170524490SJens Axboe } 146270524490SJens Axboe 146370524490SJens Axboe asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags) 146470524490SJens Axboe { 146570524490SJens Axboe struct file *in; 146670524490SJens Axboe int error, fput_in; 146770524490SJens Axboe 146870524490SJens Axboe if (unlikely(!len)) 146970524490SJens Axboe return 0; 147070524490SJens Axboe 147170524490SJens Axboe error = -EBADF; 147270524490SJens Axboe in = fget_light(fdin, &fput_in); 147370524490SJens Axboe if (in) { 147470524490SJens Axboe if (in->f_mode & FMODE_READ) { 147570524490SJens Axboe int fput_out; 147670524490SJens Axboe struct file *out = fget_light(fdout, &fput_out); 147770524490SJens Axboe 147870524490SJens Axboe if (out) { 147970524490SJens Axboe if (out->f_mode & FMODE_WRITE) 148070524490SJens Axboe error = do_tee(in, out, len, flags); 148170524490SJens Axboe fput_light(out, fput_out); 148270524490SJens Axboe } 148370524490SJens Axboe } 148470524490SJens Axboe fput_light(in, fput_in); 148570524490SJens Axboe } 148670524490SJens Axboe 148770524490SJens Axboe return error; 148870524490SJens Axboe } 1489