1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ebb7fb15SDave Chinner #define IOEND_BATCH_SIZE 4096 25ebb7fb15SDave Chinner 26ab08b01eSChristoph Hellwig /* 2795c4cd05SMatthew Wilcox (Oracle) * Structure allocated for each folio when block size < folio size 2895c4cd05SMatthew Wilcox (Oracle) * to track sub-folio uptodate status and I/O completions. 29ab08b01eSChristoph Hellwig */ 30ab08b01eSChristoph Hellwig struct iomap_page { 317d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending; 320fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending; 331cea335dSChristoph Hellwig spinlock_t uptodate_lock; 340a195b91SMatthew Wilcox (Oracle) unsigned long uptodate[]; 35ab08b01eSChristoph Hellwig }; 36ab08b01eSChristoph Hellwig 3795c4cd05SMatthew Wilcox (Oracle) static inline struct iomap_page *to_iomap_page(struct folio *folio) 38ab08b01eSChristoph Hellwig { 3995c4cd05SMatthew Wilcox (Oracle) if (folio_test_private(folio)) 4095c4cd05SMatthew Wilcox (Oracle) return folio_get_private(folio); 41ab08b01eSChristoph Hellwig return NULL; 42ab08b01eSChristoph Hellwig } 43ab08b01eSChristoph Hellwig 44598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 45598ecfbaSChristoph Hellwig 46afc51aaaSDarrick J. Wong static struct iomap_page * 47435d44b3SMatthew Wilcox (Oracle) iomap_page_create(struct inode *inode, struct folio *folio) 48afc51aaaSDarrick J. Wong { 4995c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 50435d44b3SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 51afc51aaaSDarrick J. Wong 520a195b91SMatthew Wilcox (Oracle) if (iop || nr_blocks <= 1) 53afc51aaaSDarrick J. Wong return iop; 54afc51aaaSDarrick J. Wong 550a195b91SMatthew Wilcox (Oracle) iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 560a195b91SMatthew Wilcox (Oracle) GFP_NOFS | __GFP_NOFAIL); 571cea335dSChristoph Hellwig spin_lock_init(&iop->uptodate_lock); 58435d44b3SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 594595a298SMatthew Wilcox (Oracle) bitmap_fill(iop->uptodate, nr_blocks); 60435d44b3SMatthew Wilcox (Oracle) folio_attach_private(folio, iop); 61afc51aaaSDarrick J. Wong return iop; 62afc51aaaSDarrick J. Wong } 63afc51aaaSDarrick J. Wong 64c46e8324SMatthew Wilcox (Oracle) static void iomap_page_release(struct folio *folio) 65afc51aaaSDarrick J. Wong { 66c46e8324SMatthew Wilcox (Oracle) struct iomap_page *iop = folio_detach_private(folio); 67c46e8324SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 68c46e8324SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 69afc51aaaSDarrick J. Wong 70afc51aaaSDarrick J. Wong if (!iop) 71afc51aaaSDarrick J. Wong return; 727d636676SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 730fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 740a195b91SMatthew Wilcox (Oracle) WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 75c46e8324SMatthew Wilcox (Oracle) folio_test_uptodate(folio)); 76afc51aaaSDarrick J. Wong kfree(iop); 77afc51aaaSDarrick J. Wong } 78afc51aaaSDarrick J. Wong 79afc51aaaSDarrick J. Wong /* 80431c0566SMatthew Wilcox (Oracle) * Calculate the range inside the folio that we actually need to read. 81afc51aaaSDarrick J. Wong */ 82431c0566SMatthew Wilcox (Oracle) static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 83431c0566SMatthew Wilcox (Oracle) loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 84afc51aaaSDarrick J. Wong { 85431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 86afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 87afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 88afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 89afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 90431c0566SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, *pos); 91431c0566SMatthew Wilcox (Oracle) size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 92afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 93afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 94afc51aaaSDarrick J. Wong 95afc51aaaSDarrick J. Wong /* 96f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the 97afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 98afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 99afc51aaaSDarrick J. Wong */ 100afc51aaaSDarrick J. Wong if (iop) { 101afc51aaaSDarrick J. Wong unsigned int i; 102afc51aaaSDarrick J. Wong 103afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 104afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 105afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 106afc51aaaSDarrick J. Wong break; 107afc51aaaSDarrick J. Wong *pos += block_size; 108afc51aaaSDarrick J. Wong poff += block_size; 109afc51aaaSDarrick J. Wong plen -= block_size; 110afc51aaaSDarrick J. Wong first++; 111afc51aaaSDarrick J. Wong } 112afc51aaaSDarrick J. Wong 113afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 114afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 115afc51aaaSDarrick J. Wong if (test_bit(i, iop->uptodate)) { 116afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 117afc51aaaSDarrick J. Wong last = i - 1; 118afc51aaaSDarrick J. Wong break; 119afc51aaaSDarrick J. Wong } 120afc51aaaSDarrick J. Wong } 121afc51aaaSDarrick J. Wong } 122afc51aaaSDarrick J. Wong 123afc51aaaSDarrick J. Wong /* 124f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to 125afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 126afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 127afc51aaaSDarrick J. Wong */ 128afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 129431c0566SMatthew Wilcox (Oracle) unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 130afc51aaaSDarrick J. Wong 131afc51aaaSDarrick J. Wong if (first <= end && last > end) 132afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 133afc51aaaSDarrick J. Wong } 134afc51aaaSDarrick J. Wong 135afc51aaaSDarrick J. Wong *offp = poff; 136afc51aaaSDarrick J. Wong *lenp = plen; 137afc51aaaSDarrick J. Wong } 138afc51aaaSDarrick J. Wong 139431c0566SMatthew Wilcox (Oracle) static void iomap_iop_set_range_uptodate(struct folio *folio, 140431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop, size_t off, size_t len) 141afc51aaaSDarrick J. Wong { 142431c0566SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 143afc51aaaSDarrick J. Wong unsigned first = off >> inode->i_blkbits; 144afc51aaaSDarrick J. Wong unsigned last = (off + len - 1) >> inode->i_blkbits; 1451cea335dSChristoph Hellwig unsigned long flags; 146afc51aaaSDarrick J. Wong 1471cea335dSChristoph Hellwig spin_lock_irqsave(&iop->uptodate_lock, flags); 148b21866f5SMatthew Wilcox (Oracle) bitmap_set(iop->uptodate, first, last - first + 1); 149431c0566SMatthew Wilcox (Oracle) if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 150431c0566SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 1511cea335dSChristoph Hellwig spin_unlock_irqrestore(&iop->uptodate_lock, flags); 152afc51aaaSDarrick J. Wong } 153afc51aaaSDarrick J. Wong 154431c0566SMatthew Wilcox (Oracle) static void iomap_set_range_uptodate(struct folio *folio, 155431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop, size_t off, size_t len) 1561cea335dSChristoph Hellwig { 157431c0566SMatthew Wilcox (Oracle) if (folio_test_error(folio)) 1581cea335dSChristoph Hellwig return; 1591cea335dSChristoph Hellwig 160cd1e5afeSMatthew Wilcox (Oracle) if (iop) 161431c0566SMatthew Wilcox (Oracle) iomap_iop_set_range_uptodate(folio, iop, off, len); 1621cea335dSChristoph Hellwig else 163431c0566SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 164afc51aaaSDarrick J. Wong } 165afc51aaaSDarrick J. Wong 1668ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_read(struct folio *folio, size_t offset, 1678ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 168afc51aaaSDarrick J. Wong { 16995c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 170afc51aaaSDarrick J. Wong 171afc51aaaSDarrick J. Wong if (unlikely(error)) { 1728ffd74e9SMatthew Wilcox (Oracle) folio_clear_uptodate(folio); 1738ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 174afc51aaaSDarrick J. Wong } else { 175431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset, len); 176afc51aaaSDarrick J. Wong } 177afc51aaaSDarrick J. Wong 1788ffd74e9SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 1798ffd74e9SMatthew Wilcox (Oracle) folio_unlock(folio); 180afc51aaaSDarrick J. Wong } 181afc51aaaSDarrick J. Wong 1828ffd74e9SMatthew Wilcox (Oracle) static void iomap_read_end_io(struct bio *bio) 183afc51aaaSDarrick J. Wong { 184afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 1858ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 186afc51aaaSDarrick J. Wong 1878ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio) 1888ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 189afc51aaaSDarrick J. Wong bio_put(bio); 190afc51aaaSDarrick J. Wong } 191afc51aaaSDarrick J. Wong 192afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 1933aa9c659SMatthew Wilcox (Oracle) struct folio *cur_folio; 1943aa9c659SMatthew Wilcox (Oracle) bool cur_folio_in_bio; 195afc51aaaSDarrick J. Wong struct bio *bio; 1969d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac; 197afc51aaaSDarrick J. Wong }; 198afc51aaaSDarrick J. Wong 1995ad448ceSAndreas Gruenbacher /** 2005ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache 2015ad448ceSAndreas Gruenbacher * @iter: iteration structure 202874628a2SMatthew Wilcox (Oracle) * @folio: folio to copy to 2035ad448ceSAndreas Gruenbacher * 204874628a2SMatthew Wilcox (Oracle) * Copy the inline data in @iter into @folio and zero out the rest of the folio. 2055ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file. 2065ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno. 2075ad448ceSAndreas Gruenbacher */ 2085ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter, 209874628a2SMatthew Wilcox (Oracle) struct folio *folio) 210afc51aaaSDarrick J. Wong { 211cd1e5afeSMatthew Wilcox (Oracle) struct iomap_page *iop; 212fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter); 2131b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset; 214b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset); 215431c0566SMatthew Wilcox (Oracle) size_t offset = offset_in_folio(folio, iomap->offset); 216afc51aaaSDarrick J. Wong void *addr; 217afc51aaaSDarrick J. Wong 218874628a2SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 2195ad448ceSAndreas Gruenbacher return 0; 220afc51aaaSDarrick J. Wong 221ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 222ae44f9c2SMatthew Wilcox (Oracle) return -EIO; 22369f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE - 22469f4a26cSGao Xiang offset_in_page(iomap->inline_data))) 22569f4a26cSGao Xiang return -EIO; 22669f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length)) 22769f4a26cSGao Xiang return -EIO; 228431c0566SMatthew Wilcox (Oracle) if (offset > 0) 229cd1e5afeSMatthew Wilcox (Oracle) iop = iomap_page_create(iter->inode, folio); 230cd1e5afeSMatthew Wilcox (Oracle) else 231cd1e5afeSMatthew Wilcox (Oracle) iop = to_iomap_page(folio); 232afc51aaaSDarrick J. Wong 233874628a2SMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, offset); 234afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 235b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size); 236ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 237431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 2385ad448ceSAndreas Gruenbacher return 0; 239afc51aaaSDarrick J. Wong } 240afc51aaaSDarrick J. Wong 241fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 2421b5c1e36SChristoph Hellwig loff_t pos) 243009d8d84SChristoph Hellwig { 244fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 2451b5c1e36SChristoph Hellwig 2461b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED || 2471b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) || 2481b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode); 249009d8d84SChristoph Hellwig } 250009d8d84SChristoph Hellwig 251fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 252f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset) 253afc51aaaSDarrick J. Wong { 254fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 255f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset; 256f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset; 2573aa9c659SMatthew Wilcox (Oracle) struct folio *folio = ctx->cur_folio; 258637d3375SAndreas Gruenbacher struct iomap_page *iop; 259afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 260431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 261afc51aaaSDarrick J. Wong sector_t sector; 262afc51aaaSDarrick J. Wong 2635ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE) 264874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 265afc51aaaSDarrick J. Wong 266afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 267435d44b3SMatthew Wilcox (Oracle) iop = iomap_page_create(iter->inode, folio); 268431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 269afc51aaaSDarrick J. Wong if (plen == 0) 270afc51aaaSDarrick J. Wong goto done; 271afc51aaaSDarrick J. Wong 2721b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) { 273431c0566SMatthew Wilcox (Oracle) folio_zero_range(folio, poff, plen); 274431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, poff, plen); 275afc51aaaSDarrick J. Wong goto done; 276afc51aaaSDarrick J. Wong } 277afc51aaaSDarrick J. Wong 2783aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = true; 2797d636676SMatthew Wilcox (Oracle) if (iop) 2807d636676SMatthew Wilcox (Oracle) atomic_add(plen, &iop->read_bytes_pending); 281afc51aaaSDarrick J. Wong 282afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 283d0364f94SChristoph Hellwig if (!ctx->bio || 284d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector || 285431c0566SMatthew Wilcox (Oracle) !bio_add_folio(ctx->bio, folio, plen, poff)) { 2863aa9c659SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 287457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp; 2885f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 289afc51aaaSDarrick J. Wong 290afc51aaaSDarrick J. Wong if (ctx->bio) 291afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 292afc51aaaSDarrick J. Wong 2939d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */ 294afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 29507888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 29607888c66SChristoph Hellwig REQ_OP_READ, gfp); 297457df33eSMatthew Wilcox (Oracle) /* 298457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to 299457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates 300457df33eSMatthew Wilcox (Oracle) * what do_mpage_readpage does. 301457df33eSMatthew Wilcox (Oracle) */ 30207888c66SChristoph Hellwig if (!ctx->bio) { 30307888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 30407888c66SChristoph Hellwig orig_gfp); 30507888c66SChristoph Hellwig } 3069d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) 307afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 308afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 309afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 310431c0566SMatthew Wilcox (Oracle) bio_add_folio(ctx->bio, folio, plen, poff); 311afc51aaaSDarrick J. Wong } 312431c0566SMatthew Wilcox (Oracle) 313afc51aaaSDarrick J. Wong done: 314afc51aaaSDarrick J. Wong /* 315afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 316f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but 317afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 318afc51aaaSDarrick J. Wong * iteration. 319afc51aaaSDarrick J. Wong */ 320afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 321afc51aaaSDarrick J. Wong } 322afc51aaaSDarrick J. Wong 323*7479c505SMatthew Wilcox (Oracle) int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 324afc51aaaSDarrick J. Wong { 325f6d48000SChristoph Hellwig struct iomap_iter iter = { 3263aa9c659SMatthew Wilcox (Oracle) .inode = folio->mapping->host, 3273aa9c659SMatthew Wilcox (Oracle) .pos = folio_pos(folio), 3283aa9c659SMatthew Wilcox (Oracle) .len = folio_size(folio), 329f6d48000SChristoph Hellwig }; 330f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = { 3313aa9c659SMatthew Wilcox (Oracle) .cur_folio = folio, 332f6d48000SChristoph Hellwig }; 333f6d48000SChristoph Hellwig int ret; 334afc51aaaSDarrick J. Wong 3353aa9c659SMatthew Wilcox (Oracle) trace_iomap_readpage(iter.inode, 1); 3369e91c572SChristoph Hellwig 337f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 338f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 339f6d48000SChristoph Hellwig 340f6d48000SChristoph Hellwig if (ret < 0) 3413aa9c659SMatthew Wilcox (Oracle) folio_set_error(folio); 342afc51aaaSDarrick J. Wong 343afc51aaaSDarrick J. Wong if (ctx.bio) { 344afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 3453aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(!ctx.cur_folio_in_bio); 346afc51aaaSDarrick J. Wong } else { 3473aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(ctx.cur_folio_in_bio); 3483aa9c659SMatthew Wilcox (Oracle) folio_unlock(folio); 349afc51aaaSDarrick J. Wong } 350afc51aaaSDarrick J. Wong 351afc51aaaSDarrick J. Wong /* 352f1f264b4SAndreas Gruenbacher * Just like mpage_readahead and block_read_full_page, we always 353*7479c505SMatthew Wilcox (Oracle) * return 0 and just set the folio error flag on errors. This 354f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually. 355afc51aaaSDarrick J. Wong */ 356afc51aaaSDarrick J. Wong return 0; 357afc51aaaSDarrick J. Wong } 358*7479c505SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_read_folio); 359afc51aaaSDarrick J. Wong 360fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 361f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx) 362afc51aaaSDarrick J. Wong { 363f6d48000SChristoph Hellwig loff_t length = iomap_length(iter); 364afc51aaaSDarrick J. Wong loff_t done, ret; 365afc51aaaSDarrick J. Wong 366afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 3673aa9c659SMatthew Wilcox (Oracle) if (ctx->cur_folio && 3683aa9c659SMatthew Wilcox (Oracle) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 3693aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio_in_bio) 3703aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx->cur_folio); 3713aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = NULL; 372afc51aaaSDarrick J. Wong } 3733aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio) { 3743aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = readahead_folio(ctx->rac); 3753aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = false; 376afc51aaaSDarrick J. Wong } 377f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done); 378d8af404fSAndreas Gruenbacher if (ret <= 0) 379d8af404fSAndreas Gruenbacher return ret; 380afc51aaaSDarrick J. Wong } 381afc51aaaSDarrick J. Wong 382afc51aaaSDarrick J. Wong return done; 383afc51aaaSDarrick J. Wong } 384afc51aaaSDarrick J. Wong 3859d24a13aSMatthew Wilcox (Oracle) /** 3869d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file. 3879d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read. 3889d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem. 3899d24a13aSMatthew Wilcox (Oracle) * 3909d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead 3919d24a13aSMatthew Wilcox (Oracle) * address_space operation. 3929d24a13aSMatthew Wilcox (Oracle) * 3939d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of 3949d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to 3959d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided. 3969d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This 3979d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause 3989d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered. 3999d24a13aSMatthew Wilcox (Oracle) */ 4009d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 401afc51aaaSDarrick J. Wong { 402f6d48000SChristoph Hellwig struct iomap_iter iter = { 403f6d48000SChristoph Hellwig .inode = rac->mapping->host, 404f6d48000SChristoph Hellwig .pos = readahead_pos(rac), 405f6d48000SChristoph Hellwig .len = readahead_length(rac), 406f6d48000SChristoph Hellwig }; 407afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 4089d24a13aSMatthew Wilcox (Oracle) .rac = rac, 409afc51aaaSDarrick J. Wong }; 410afc51aaaSDarrick J. Wong 411f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 4129e91c572SChristoph Hellwig 413f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0) 414f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx); 4159d24a13aSMatthew Wilcox (Oracle) 416afc51aaaSDarrick J. Wong if (ctx.bio) 417afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 4183aa9c659SMatthew Wilcox (Oracle) if (ctx.cur_folio) { 4193aa9c659SMatthew Wilcox (Oracle) if (!ctx.cur_folio_in_bio) 4203aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx.cur_folio); 421afc51aaaSDarrick J. Wong } 422afc51aaaSDarrick J. Wong } 4239d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead); 424afc51aaaSDarrick J. Wong 425afc51aaaSDarrick J. Wong /* 4262e7e80f7SMatthew Wilcox (Oracle) * iomap_is_partially_uptodate checks whether blocks within a folio are 427afc51aaaSDarrick J. Wong * uptodate or not. 428afc51aaaSDarrick J. Wong * 4292e7e80f7SMatthew Wilcox (Oracle) * Returns true if all blocks which correspond to the specified part 4302e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 431afc51aaaSDarrick J. Wong */ 4322e7e80f7SMatthew Wilcox (Oracle) bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 433afc51aaaSDarrick J. Wong { 43495c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 4352e7e80f7SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 4362e7e80f7SMatthew Wilcox (Oracle) unsigned first, last, i; 437afc51aaaSDarrick J. Wong 4382e7e80f7SMatthew Wilcox (Oracle) if (!iop) 4392e7e80f7SMatthew Wilcox (Oracle) return false; 4402e7e80f7SMatthew Wilcox (Oracle) 4412756c818SMatthew Wilcox (Oracle) /* Caller's range may extend past the end of this folio */ 4422756c818SMatthew Wilcox (Oracle) count = min(folio_size(folio) - from, count); 443afc51aaaSDarrick J. Wong 4442756c818SMatthew Wilcox (Oracle) /* First and last blocks in range within folio */ 445afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 4462756c818SMatthew Wilcox (Oracle) last = (from + count - 1) >> inode->i_blkbits; 447afc51aaaSDarrick J. Wong 448afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 449afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 4502e7e80f7SMatthew Wilcox (Oracle) return false; 4512e7e80f7SMatthew Wilcox (Oracle) return true; 452afc51aaaSDarrick J. Wong } 453afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 454afc51aaaSDarrick J. Wong 455afc51aaaSDarrick J. Wong int 456afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask) 457afc51aaaSDarrick J. Wong { 458c46e8324SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 459c46e8324SMatthew Wilcox (Oracle) 46039f16c83SMatthew Wilcox (Oracle) trace_iomap_releasepage(folio->mapping->host, folio_pos(folio), 46139f16c83SMatthew Wilcox (Oracle) folio_size(folio)); 4629e91c572SChristoph Hellwig 463afc51aaaSDarrick J. Wong /* 464afc51aaaSDarrick J. Wong * mm accommodates an old ext3 case where clean pages might not have had 465afc51aaaSDarrick J. Wong * the dirty bit cleared. Thus, it can send actual dirty pages to 466f1f264b4SAndreas Gruenbacher * ->releasepage() via shrink_active_list(); skip those here. 467afc51aaaSDarrick J. Wong */ 46839f16c83SMatthew Wilcox (Oracle) if (folio_test_dirty(folio) || folio_test_writeback(folio)) 469afc51aaaSDarrick J. Wong return 0; 470c46e8324SMatthew Wilcox (Oracle) iomap_page_release(folio); 471afc51aaaSDarrick J. Wong return 1; 472afc51aaaSDarrick J. Wong } 473afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage); 474afc51aaaSDarrick J. Wong 4758306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 476afc51aaaSDarrick J. Wong { 477d82354f6SMatthew Wilcox (Oracle) trace_iomap_invalidate_folio(folio->mapping->host, 4781241ebecSMatthew Wilcox (Oracle) folio_pos(folio) + offset, len); 4799e91c572SChristoph Hellwig 480afc51aaaSDarrick J. Wong /* 48160d82310SMatthew Wilcox (Oracle) * If we're invalidating the entire folio, clear the dirty state 48260d82310SMatthew Wilcox (Oracle) * from it and release it to avoid unnecessary buildup of the LRU. 483afc51aaaSDarrick J. Wong */ 4848306a5f5SMatthew Wilcox (Oracle) if (offset == 0 && len == folio_size(folio)) { 4858306a5f5SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 4868306a5f5SMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 487c46e8324SMatthew Wilcox (Oracle) iomap_page_release(folio); 48860d82310SMatthew Wilcox (Oracle) } else if (folio_test_large(folio)) { 48960d82310SMatthew Wilcox (Oracle) /* Must release the iop so the page can be split */ 49060d82310SMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio) && 49160d82310SMatthew Wilcox (Oracle) folio_test_dirty(folio)); 49260d82310SMatthew Wilcox (Oracle) iomap_page_release(folio); 493afc51aaaSDarrick J. Wong } 494afc51aaaSDarrick J. Wong } 4958306a5f5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 4968306a5f5SMatthew Wilcox (Oracle) 497afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION 498afc51aaaSDarrick J. Wong int 499afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage, 500afc51aaaSDarrick J. Wong struct page *page, enum migrate_mode mode) 501afc51aaaSDarrick J. Wong { 502589110e8SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 503589110e8SMatthew Wilcox (Oracle) struct folio *newfolio = page_folio(newpage); 504afc51aaaSDarrick J. Wong int ret; 505afc51aaaSDarrick J. Wong 506589110e8SMatthew Wilcox (Oracle) ret = folio_migrate_mapping(mapping, newfolio, folio, 0); 507afc51aaaSDarrick J. Wong if (ret != MIGRATEPAGE_SUCCESS) 508afc51aaaSDarrick J. Wong return ret; 509afc51aaaSDarrick J. Wong 510589110e8SMatthew Wilcox (Oracle) if (folio_test_private(folio)) 511589110e8SMatthew Wilcox (Oracle) folio_attach_private(newfolio, folio_detach_private(folio)); 512afc51aaaSDarrick J. Wong 513afc51aaaSDarrick J. Wong if (mode != MIGRATE_SYNC_NO_COPY) 514589110e8SMatthew Wilcox (Oracle) folio_migrate_copy(newfolio, folio); 515afc51aaaSDarrick J. Wong else 516589110e8SMatthew Wilcox (Oracle) folio_migrate_flags(newfolio, folio); 517afc51aaaSDarrick J. Wong return MIGRATEPAGE_SUCCESS; 518afc51aaaSDarrick J. Wong } 519afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page); 520afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */ 521afc51aaaSDarrick J. Wong 522afc51aaaSDarrick J. Wong static void 523afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 524afc51aaaSDarrick J. Wong { 525afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 526afc51aaaSDarrick J. Wong 527afc51aaaSDarrick J. Wong /* 528afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 529afc51aaaSDarrick J. Wong * write started inside the existing inode size. 530afc51aaaSDarrick J. Wong */ 531afc51aaaSDarrick J. Wong if (pos + len > i_size) 532afc51aaaSDarrick J. Wong truncate_pagecache_range(inode, max(pos, i_size), pos + len); 533afc51aaaSDarrick J. Wong } 534afc51aaaSDarrick J. Wong 535431c0566SMatthew Wilcox (Oracle) static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 536431c0566SMatthew Wilcox (Oracle) size_t poff, size_t plen, const struct iomap *iomap) 537afc51aaaSDarrick J. Wong { 538afc51aaaSDarrick J. Wong struct bio_vec bvec; 539afc51aaaSDarrick J. Wong struct bio bio; 540afc51aaaSDarrick J. Wong 54149add496SChristoph Hellwig bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 542afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 543431c0566SMatthew Wilcox (Oracle) bio_add_folio(&bio, folio, plen, poff); 544afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 545afc51aaaSDarrick J. Wong } 546afc51aaaSDarrick J. Wong 547fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 548bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio *folio) 549afc51aaaSDarrick J. Wong { 550fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 551435d44b3SMatthew Wilcox (Oracle) struct iomap_page *iop = iomap_page_create(iter->inode, folio); 5521b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode); 5536cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size); 5546cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size); 555431c0566SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos), to = from + len; 556431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 557afc51aaaSDarrick J. Wong 558431c0566SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 559afc51aaaSDarrick J. Wong return 0; 560431c0566SMatthew Wilcox (Oracle) folio_clear_error(folio); 561afc51aaaSDarrick J. Wong 562afc51aaaSDarrick J. Wong do { 563431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &block_start, 564afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 565afc51aaaSDarrick J. Wong if (plen == 0) 566afc51aaaSDarrick J. Wong break; 567afc51aaaSDarrick J. Wong 568b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) && 56932a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) && 570d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen)) 571d3b40439SChristoph Hellwig continue; 572d3b40439SChristoph Hellwig 5731b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) { 574b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 57532a38a49SChristoph Hellwig return -EIO; 576431c0566SMatthew Wilcox (Oracle) folio_zero_segments(folio, poff, from, to, poff + plen); 57714284fedSMatthew Wilcox (Oracle) } else { 578431c0566SMatthew Wilcox (Oracle) int status = iomap_read_folio_sync(block_start, folio, 57914284fedSMatthew Wilcox (Oracle) poff, plen, srcmap); 580d3b40439SChristoph Hellwig if (status) 581d3b40439SChristoph Hellwig return status; 58214284fedSMatthew Wilcox (Oracle) } 583431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, poff, plen); 584afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 585afc51aaaSDarrick J. Wong 586d3b40439SChristoph Hellwig return 0; 587afc51aaaSDarrick J. Wong } 588afc51aaaSDarrick J. Wong 589fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter, 590bc6123a8SMatthew Wilcox (Oracle) struct folio *folio) 59169f4a26cSGao Xiang { 59269f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */ 5931b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 59469f4a26cSGao Xiang return -EIO; 595874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 59669f4a26cSGao Xiang } 59769f4a26cSGao Xiang 598fad0a1abSChristoph Hellwig static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 599bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio **foliop) 600afc51aaaSDarrick J. Wong { 6011b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 602fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 603d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio; 604bc6123a8SMatthew Wilcox (Oracle) unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 605afc51aaaSDarrick J. Wong int status = 0; 606afc51aaaSDarrick J. Wong 6071b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 6081b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap) 609c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length); 610afc51aaaSDarrick J. Wong 611afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 612afc51aaaSDarrick J. Wong return -EINTR; 613afc51aaaSDarrick J. Wong 614d454ab82SMatthew Wilcox (Oracle) if (!mapping_large_folio_support(iter->inode->i_mapping)) 615d454ab82SMatthew Wilcox (Oracle) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 616d454ab82SMatthew Wilcox (Oracle) 617afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_prepare) { 6181b5c1e36SChristoph Hellwig status = page_ops->page_prepare(iter->inode, pos, len); 619afc51aaaSDarrick J. Wong if (status) 620afc51aaaSDarrick J. Wong return status; 621afc51aaaSDarrick J. Wong } 622afc51aaaSDarrick J. Wong 623bc6123a8SMatthew Wilcox (Oracle) folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 624bc6123a8SMatthew Wilcox (Oracle) fgp, mapping_gfp_mask(iter->inode->i_mapping)); 625bc6123a8SMatthew Wilcox (Oracle) if (!folio) { 626afc51aaaSDarrick J. Wong status = -ENOMEM; 627afc51aaaSDarrick J. Wong goto out_no_page; 628afc51aaaSDarrick J. Wong } 629d454ab82SMatthew Wilcox (Oracle) if (pos + len > folio_pos(folio) + folio_size(folio)) 630d454ab82SMatthew Wilcox (Oracle) len = folio_pos(folio) + folio_size(folio) - pos; 631afc51aaaSDarrick J. Wong 632c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) 633bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin_inline(iter, folio); 6341b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 635d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 636afc51aaaSDarrick J. Wong else 637bc6123a8SMatthew Wilcox (Oracle) status = __iomap_write_begin(iter, pos, len, folio); 638afc51aaaSDarrick J. Wong 639afc51aaaSDarrick J. Wong if (unlikely(status)) 640afc51aaaSDarrick J. Wong goto out_unlock; 641afc51aaaSDarrick J. Wong 642bc6123a8SMatthew Wilcox (Oracle) *foliop = folio; 643afc51aaaSDarrick J. Wong return 0; 644afc51aaaSDarrick J. Wong 645afc51aaaSDarrick J. Wong out_unlock: 646bc6123a8SMatthew Wilcox (Oracle) folio_unlock(folio); 647bc6123a8SMatthew Wilcox (Oracle) folio_put(folio); 6481b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 649afc51aaaSDarrick J. Wong 650afc51aaaSDarrick J. Wong out_no_page: 651afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 6521b5c1e36SChristoph Hellwig page_ops->page_done(iter->inode, pos, 0, NULL); 653afc51aaaSDarrick J. Wong return status; 654afc51aaaSDarrick J. Wong } 655afc51aaaSDarrick J. Wong 656e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 657bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 658afc51aaaSDarrick J. Wong { 659cd1e5afeSMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 660bc6123a8SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 661afc51aaaSDarrick J. Wong 662afc51aaaSDarrick J. Wong /* 663afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 664*7479c505SMatthew Wilcox (Oracle) * don't have to worry about a read_folio reading them and overwriting a 665f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only 666afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 667*7479c505SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write. 668afc51aaaSDarrick J. Wong * 669f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a 670f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to 671f1f264b4SAndreas Gruenbacher * redo the whole thing. 672afc51aaaSDarrick J. Wong */ 673bc6123a8SMatthew Wilcox (Oracle) if (unlikely(copied < len && !folio_test_uptodate(folio))) 674afc51aaaSDarrick J. Wong return 0; 675431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 676bc6123a8SMatthew Wilcox (Oracle) filemap_dirty_folio(inode->i_mapping, folio); 677afc51aaaSDarrick J. Wong return copied; 678afc51aaaSDarrick J. Wong } 679afc51aaaSDarrick J. Wong 680fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter, 6819c4ce08dSMatthew Wilcox (Oracle) struct folio *folio, loff_t pos, size_t copied) 682afc51aaaSDarrick J. Wong { 683fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 684afc51aaaSDarrick J. Wong void *addr; 685afc51aaaSDarrick J. Wong 6869c4ce08dSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 68769f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap)); 688afc51aaaSDarrick J. Wong 6899c4ce08dSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 6909c4ce08dSMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, pos); 691ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied); 692ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 693afc51aaaSDarrick J. Wong 6941b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode); 695afc51aaaSDarrick J. Wong return copied; 696afc51aaaSDarrick J. Wong } 697afc51aaaSDarrick J. Wong 698e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 6991b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 700bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 701afc51aaaSDarrick J. Wong { 7021b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 703fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 7041b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size; 705e25ba8cbSMatthew Wilcox (Oracle) size_t ret; 706afc51aaaSDarrick J. Wong 707c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) { 7089c4ce08dSMatthew Wilcox (Oracle) ret = iomap_write_end_inline(iter, folio, pos, copied); 709c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 7101b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 711bc6123a8SMatthew Wilcox (Oracle) copied, &folio->page, NULL); 712afc51aaaSDarrick J. Wong } else { 713bc6123a8SMatthew Wilcox (Oracle) ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 714afc51aaaSDarrick J. Wong } 715afc51aaaSDarrick J. Wong 716afc51aaaSDarrick J. Wong /* 717afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 718afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 719afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 720afc51aaaSDarrick J. Wong */ 721afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 7221b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret); 7231b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 724afc51aaaSDarrick J. Wong } 725bc6123a8SMatthew Wilcox (Oracle) folio_unlock(folio); 726afc51aaaSDarrick J. Wong 727afc51aaaSDarrick J. Wong if (old_size < pos) 7281b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos); 729afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 730bc6123a8SMatthew Wilcox (Oracle) page_ops->page_done(iter->inode, pos, ret, &folio->page); 731bc6123a8SMatthew Wilcox (Oracle) folio_put(folio); 732afc51aaaSDarrick J. Wong 733afc51aaaSDarrick J. Wong if (ret < len) 7341b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 735afc51aaaSDarrick J. Wong return ret; 736afc51aaaSDarrick J. Wong } 737afc51aaaSDarrick J. Wong 738ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 739afc51aaaSDarrick J. Wong { 740ce83a025SChristoph Hellwig loff_t length = iomap_length(iter); 741ce83a025SChristoph Hellwig loff_t pos = iter->pos; 742afc51aaaSDarrick J. Wong ssize_t written = 0; 743ce83a025SChristoph Hellwig long status = 0; 744afc51aaaSDarrick J. Wong 745afc51aaaSDarrick J. Wong do { 746bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 747afc51aaaSDarrick J. Wong struct page *page; 748afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 749afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 750afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 751afc51aaaSDarrick J. Wong 752afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 753afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 754afc51aaaSDarrick J. Wong iov_iter_count(i)); 755afc51aaaSDarrick J. Wong again: 756afc51aaaSDarrick J. Wong if (bytes > length) 757afc51aaaSDarrick J. Wong bytes = length; 758afc51aaaSDarrick J. Wong 759afc51aaaSDarrick J. Wong /* 760f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_. 761afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 762afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 763afc51aaaSDarrick J. Wong * up-to-date. 764afc51aaaSDarrick J. Wong */ 765631f871fSAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 766afc51aaaSDarrick J. Wong status = -EFAULT; 767afc51aaaSDarrick J. Wong break; 768afc51aaaSDarrick J. Wong } 769afc51aaaSDarrick J. Wong 770bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 771afc51aaaSDarrick J. Wong if (unlikely(status)) 772afc51aaaSDarrick J. Wong break; 773afc51aaaSDarrick J. Wong 774bc6123a8SMatthew Wilcox (Oracle) page = folio_file_page(folio, pos >> PAGE_SHIFT); 775ce83a025SChristoph Hellwig if (mapping_writably_mapped(iter->inode->i_mapping)) 776afc51aaaSDarrick J. Wong flush_dcache_page(page); 777afc51aaaSDarrick J. Wong 778f0b65f39SAl Viro copied = copy_page_from_iter_atomic(page, offset, bytes, i); 779afc51aaaSDarrick J. Wong 780bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, copied, folio); 781afc51aaaSDarrick J. Wong 782f0b65f39SAl Viro if (unlikely(copied != status)) 783f0b65f39SAl Viro iov_iter_revert(i, copied - status); 784afc51aaaSDarrick J. Wong 785f0b65f39SAl Viro cond_resched(); 786bc1bb416SAl Viro if (unlikely(status == 0)) { 787afc51aaaSDarrick J. Wong /* 788bc1bb416SAl Viro * A short copy made iomap_write_end() reject the 789bc1bb416SAl Viro * thing entirely. Might be memory poisoning 790bc1bb416SAl Viro * halfway through, might be a race with munmap, 791bc1bb416SAl Viro * might be severe memory pressure. 792afc51aaaSDarrick J. Wong */ 793bc1bb416SAl Viro if (copied) 794bc1bb416SAl Viro bytes = copied; 795afc51aaaSDarrick J. Wong goto again; 796afc51aaaSDarrick J. Wong } 797f0b65f39SAl Viro pos += status; 798f0b65f39SAl Viro written += status; 799f0b65f39SAl Viro length -= status; 800afc51aaaSDarrick J. Wong 801ce83a025SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 802afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 803afc51aaaSDarrick J. Wong 804afc51aaaSDarrick J. Wong return written ? written : status; 805afc51aaaSDarrick J. Wong } 806afc51aaaSDarrick J. Wong 807afc51aaaSDarrick J. Wong ssize_t 808ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 809afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 810afc51aaaSDarrick J. Wong { 811ce83a025SChristoph Hellwig struct iomap_iter iter = { 812ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 813ce83a025SChristoph Hellwig .pos = iocb->ki_pos, 814ce83a025SChristoph Hellwig .len = iov_iter_count(i), 815ce83a025SChristoph Hellwig .flags = IOMAP_WRITE, 816ce83a025SChristoph Hellwig }; 817ce83a025SChristoph Hellwig int ret; 818afc51aaaSDarrick J. Wong 819ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 820ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i); 821ce83a025SChristoph Hellwig if (iter.pos == iocb->ki_pos) 822ce83a025SChristoph Hellwig return ret; 823ce83a025SChristoph Hellwig return iter.pos - iocb->ki_pos; 824afc51aaaSDarrick J. Wong } 825afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 826afc51aaaSDarrick J. Wong 8278fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter) 828afc51aaaSDarrick J. Wong { 8298fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap; 830fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8318fc274d1SChristoph Hellwig loff_t pos = iter->pos; 8328fc274d1SChristoph Hellwig loff_t length = iomap_length(iter); 833afc51aaaSDarrick J. Wong long status = 0; 834d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0; 835afc51aaaSDarrick J. Wong 8363590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 8373590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 8383590c4d8SChristoph Hellwig return length; 8393590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 840c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 8413590c4d8SChristoph Hellwig return length; 8423590c4d8SChristoph Hellwig 843afc51aaaSDarrick J. Wong do { 84432a38a49SChristoph Hellwig unsigned long offset = offset_in_page(pos); 84532a38a49SChristoph Hellwig unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 846bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 847afc51aaaSDarrick J. Wong 848bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 849afc51aaaSDarrick J. Wong if (unlikely(status)) 850afc51aaaSDarrick J. Wong return status; 851afc51aaaSDarrick J. Wong 852bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, bytes, folio); 853afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 854afc51aaaSDarrick J. Wong return -EIO; 855afc51aaaSDarrick J. Wong 856afc51aaaSDarrick J. Wong cond_resched(); 857afc51aaaSDarrick J. Wong 858afc51aaaSDarrick J. Wong pos += status; 859afc51aaaSDarrick J. Wong written += status; 860afc51aaaSDarrick J. Wong length -= status; 861afc51aaaSDarrick J. Wong 8628fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 863afc51aaaSDarrick J. Wong } while (length); 864afc51aaaSDarrick J. Wong 865afc51aaaSDarrick J. Wong return written; 866afc51aaaSDarrick J. Wong } 867afc51aaaSDarrick J. Wong 868afc51aaaSDarrick J. Wong int 8693590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 870afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 871afc51aaaSDarrick J. Wong { 8728fc274d1SChristoph Hellwig struct iomap_iter iter = { 8738fc274d1SChristoph Hellwig .inode = inode, 8748fc274d1SChristoph Hellwig .pos = pos, 8758fc274d1SChristoph Hellwig .len = len, 876b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE, 8778fc274d1SChristoph Hellwig }; 8788fc274d1SChristoph Hellwig int ret; 879afc51aaaSDarrick J. Wong 8808fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 8818fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter); 882afc51aaaSDarrick J. Wong return ret; 883afc51aaaSDarrick J. Wong } 8843590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 885afc51aaaSDarrick J. Wong 8862aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 887afc51aaaSDarrick J. Wong { 888fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8892aa3048eSChristoph Hellwig loff_t pos = iter->pos; 8902aa3048eSChristoph Hellwig loff_t length = iomap_length(iter); 891afc51aaaSDarrick J. Wong loff_t written = 0; 892afc51aaaSDarrick J. Wong 893afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 894c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 89581ee8e52SMatthew Wilcox (Oracle) return length; 896afc51aaaSDarrick J. Wong 897afc51aaaSDarrick J. Wong do { 8984d7bd0ebSMatthew Wilcox (Oracle) struct folio *folio; 8994d7bd0ebSMatthew Wilcox (Oracle) int status; 9004d7bd0ebSMatthew Wilcox (Oracle) size_t offset; 9014d7bd0ebSMatthew Wilcox (Oracle) size_t bytes = min_t(u64, SIZE_MAX, length); 902afc51aaaSDarrick J. Wong 9034d7bd0ebSMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 9044d7bd0ebSMatthew Wilcox (Oracle) if (status) 9054d7bd0ebSMatthew Wilcox (Oracle) return status; 9064d7bd0ebSMatthew Wilcox (Oracle) 9074d7bd0ebSMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 9084d7bd0ebSMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 9094d7bd0ebSMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 9104d7bd0ebSMatthew Wilcox (Oracle) 9114d7bd0ebSMatthew Wilcox (Oracle) folio_zero_range(folio, offset, bytes); 9124d7bd0ebSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 9134d7bd0ebSMatthew Wilcox (Oracle) 9144d7bd0ebSMatthew Wilcox (Oracle) bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 9154d7bd0ebSMatthew Wilcox (Oracle) if (WARN_ON_ONCE(bytes == 0)) 9164d7bd0ebSMatthew Wilcox (Oracle) return -EIO; 917afc51aaaSDarrick J. Wong 918afc51aaaSDarrick J. Wong pos += bytes; 91981ee8e52SMatthew Wilcox (Oracle) length -= bytes; 920afc51aaaSDarrick J. Wong written += bytes; 921afc51aaaSDarrick J. Wong if (did_zero) 922afc51aaaSDarrick J. Wong *did_zero = true; 92381ee8e52SMatthew Wilcox (Oracle) } while (length > 0); 924afc51aaaSDarrick J. Wong 925afc51aaaSDarrick J. Wong return written; 926afc51aaaSDarrick J. Wong } 927afc51aaaSDarrick J. Wong 928afc51aaaSDarrick J. Wong int 929afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 930afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 931afc51aaaSDarrick J. Wong { 9322aa3048eSChristoph Hellwig struct iomap_iter iter = { 9332aa3048eSChristoph Hellwig .inode = inode, 9342aa3048eSChristoph Hellwig .pos = pos, 9352aa3048eSChristoph Hellwig .len = len, 9362aa3048eSChristoph Hellwig .flags = IOMAP_ZERO, 9372aa3048eSChristoph Hellwig }; 9382aa3048eSChristoph Hellwig int ret; 939afc51aaaSDarrick J. Wong 9402aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 9412aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero); 942afc51aaaSDarrick J. Wong return ret; 943afc51aaaSDarrick J. Wong } 944afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 945afc51aaaSDarrick J. Wong 946afc51aaaSDarrick J. Wong int 947afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 948afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 949afc51aaaSDarrick J. Wong { 950afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 951afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 952afc51aaaSDarrick J. Wong 953afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 954afc51aaaSDarrick J. Wong if (!off) 955afc51aaaSDarrick J. Wong return 0; 956afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 957afc51aaaSDarrick J. Wong } 958afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 959afc51aaaSDarrick J. Wong 960ea0f843aSMatthew Wilcox (Oracle) static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 961ea0f843aSMatthew Wilcox (Oracle) struct folio *folio) 962afc51aaaSDarrick J. Wong { 963253564baSChristoph Hellwig loff_t length = iomap_length(iter); 964afc51aaaSDarrick J. Wong int ret; 965afc51aaaSDarrick J. Wong 966253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 967d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL, 968253564baSChristoph Hellwig &iter->iomap); 969afc51aaaSDarrick J. Wong if (ret) 970afc51aaaSDarrick J. Wong return ret; 971ea0f843aSMatthew Wilcox (Oracle) block_commit_write(&folio->page, 0, length); 972afc51aaaSDarrick J. Wong } else { 973ea0f843aSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 974ea0f843aSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 975afc51aaaSDarrick J. Wong } 976afc51aaaSDarrick J. Wong 977afc51aaaSDarrick J. Wong return length; 978afc51aaaSDarrick J. Wong } 979afc51aaaSDarrick J. Wong 980afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 981afc51aaaSDarrick J. Wong { 982253564baSChristoph Hellwig struct iomap_iter iter = { 983253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file), 984253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT, 985253564baSChristoph Hellwig }; 986ea0f843aSMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page); 987afc51aaaSDarrick J. Wong ssize_t ret; 988afc51aaaSDarrick J. Wong 989ea0f843aSMatthew Wilcox (Oracle) folio_lock(folio); 990ea0f843aSMatthew Wilcox (Oracle) ret = folio_mkwrite_check_truncate(folio, iter.inode); 991243145bcSAndreas Gruenbacher if (ret < 0) 992afc51aaaSDarrick J. Wong goto out_unlock; 993ea0f843aSMatthew Wilcox (Oracle) iter.pos = folio_pos(folio); 994253564baSChristoph Hellwig iter.len = ret; 995253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 996ea0f843aSMatthew Wilcox (Oracle) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 997afc51aaaSDarrick J. Wong 998253564baSChristoph Hellwig if (ret < 0) 999afc51aaaSDarrick J. Wong goto out_unlock; 1000ea0f843aSMatthew Wilcox (Oracle) folio_wait_stable(folio); 1001afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1002afc51aaaSDarrick J. Wong out_unlock: 1003ea0f843aSMatthew Wilcox (Oracle) folio_unlock(folio); 1004afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1005afc51aaaSDarrick J. Wong } 1006afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1007598ecfbaSChristoph Hellwig 10088ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 10098ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 1010598ecfbaSChristoph Hellwig { 101195c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 1012598ecfbaSChristoph Hellwig 1013598ecfbaSChristoph Hellwig if (error) { 10148ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 1015b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1016598ecfbaSChristoph Hellwig } 1017598ecfbaSChristoph Hellwig 10188ffd74e9SMatthew Wilcox (Oracle) WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 10190fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1020598ecfbaSChristoph Hellwig 10210fb2d720SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 10228ffd74e9SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1023598ecfbaSChristoph Hellwig } 1024598ecfbaSChristoph Hellwig 1025598ecfbaSChristoph Hellwig /* 1026598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1027598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1028598ecfbaSChristoph Hellwig * ioend after this. 1029598ecfbaSChristoph Hellwig */ 1030ebb7fb15SDave Chinner static u32 1031598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1032598ecfbaSChristoph Hellwig { 1033598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1034598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1035598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1036598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1037c275779fSZorro Lang loff_t offset = ioend->io_offset; 1038598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1039ebb7fb15SDave Chinner u32 folio_count = 0; 1040598ecfbaSChristoph Hellwig 1041598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 10428ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 1043598ecfbaSChristoph Hellwig 1044598ecfbaSChristoph Hellwig /* 1045598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1046598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1047598ecfbaSChristoph Hellwig */ 1048598ecfbaSChristoph Hellwig if (bio == last) 1049598ecfbaSChristoph Hellwig next = NULL; 1050598ecfbaSChristoph Hellwig else 1051598ecfbaSChristoph Hellwig next = bio->bi_private; 1052598ecfbaSChristoph Hellwig 10538ffd74e9SMatthew Wilcox (Oracle) /* walk all folios in bio, ending page IO on them */ 1054ebb7fb15SDave Chinner bio_for_each_folio_all(fi, bio) { 10558ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_write(inode, fi.folio, fi.length, 10568ffd74e9SMatthew Wilcox (Oracle) error); 1057ebb7fb15SDave Chinner folio_count++; 1058ebb7fb15SDave Chinner } 1059598ecfbaSChristoph Hellwig bio_put(bio); 1060598ecfbaSChristoph Hellwig } 1061c275779fSZorro Lang /* The ioend has been freed by bio_put() */ 1062598ecfbaSChristoph Hellwig 1063598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1064598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 10659cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 1066c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start); 1067598ecfbaSChristoph Hellwig } 1068ebb7fb15SDave Chinner return folio_count; 1069598ecfbaSChristoph Hellwig } 1070598ecfbaSChristoph Hellwig 1071ebb7fb15SDave Chinner /* 1072ebb7fb15SDave Chinner * Ioend completion routine for merged bios. This can only be called from task 1073ebb7fb15SDave Chinner * contexts as merged ioends can be of unbound length. Hence we have to break up 1074ebb7fb15SDave Chinner * the writeback completions into manageable chunks to avoid long scheduler 1075ebb7fb15SDave Chinner * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1076ebb7fb15SDave Chinner * good batch processing throughput without creating adverse scheduler latency 1077ebb7fb15SDave Chinner * conditions. 1078ebb7fb15SDave Chinner */ 1079598ecfbaSChristoph Hellwig void 1080598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1081598ecfbaSChristoph Hellwig { 1082598ecfbaSChristoph Hellwig struct list_head tmp; 1083ebb7fb15SDave Chinner u32 completions; 1084ebb7fb15SDave Chinner 1085ebb7fb15SDave Chinner might_sleep(); 1086598ecfbaSChristoph Hellwig 1087598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1088ebb7fb15SDave Chinner completions = iomap_finish_ioend(ioend, error); 1089598ecfbaSChristoph Hellwig 1090598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1091ebb7fb15SDave Chinner if (completions > IOEND_BATCH_SIZE * 8) { 1092ebb7fb15SDave Chinner cond_resched(); 1093ebb7fb15SDave Chinner completions = 0; 1094ebb7fb15SDave Chinner } 1095598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1096598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1097ebb7fb15SDave Chinner completions += iomap_finish_ioend(ioend, error); 1098598ecfbaSChristoph Hellwig } 1099598ecfbaSChristoph Hellwig } 1100598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1101598ecfbaSChristoph Hellwig 1102598ecfbaSChristoph Hellwig /* 1103598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1104598ecfbaSChristoph Hellwig */ 1105598ecfbaSChristoph Hellwig static bool 1106598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1107598ecfbaSChristoph Hellwig { 1108598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1109598ecfbaSChristoph Hellwig return false; 1110598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1111598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1112598ecfbaSChristoph Hellwig return false; 1113598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1114598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1115598ecfbaSChristoph Hellwig return false; 1116598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1117598ecfbaSChristoph Hellwig return false; 1118ebb7fb15SDave Chinner /* 1119ebb7fb15SDave Chinner * Do not merge physically discontiguous ioends. The filesystem 1120ebb7fb15SDave Chinner * completion functions will have to iterate the physical 1121ebb7fb15SDave Chinner * discontiguities even if we merge the ioends at a logical level, so 1122ebb7fb15SDave Chinner * we don't gain anything by merging physical discontiguities here. 1123ebb7fb15SDave Chinner * 1124ebb7fb15SDave Chinner * We cannot use bio->bi_iter.bi_sector here as it is modified during 1125ebb7fb15SDave Chinner * submission so does not point to the start sector of the bio at 1126ebb7fb15SDave Chinner * completion. 1127ebb7fb15SDave Chinner */ 1128ebb7fb15SDave Chinner if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1129ebb7fb15SDave Chinner return false; 1130598ecfbaSChristoph Hellwig return true; 1131598ecfbaSChristoph Hellwig } 1132598ecfbaSChristoph Hellwig 1133598ecfbaSChristoph Hellwig void 11346e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1135598ecfbaSChristoph Hellwig { 1136598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1137598ecfbaSChristoph Hellwig 1138598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1139598ecfbaSChristoph Hellwig 1140598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1141598ecfbaSChristoph Hellwig io_list))) { 1142598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1143598ecfbaSChristoph Hellwig break; 1144598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1145598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1146598ecfbaSChristoph Hellwig } 1147598ecfbaSChristoph Hellwig } 1148598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1149598ecfbaSChristoph Hellwig 1150598ecfbaSChristoph Hellwig static int 11514f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a, 11524f0f586bSSami Tolvanen const struct list_head *b) 1153598ecfbaSChristoph Hellwig { 1154b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1155b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1156598ecfbaSChristoph Hellwig 1157598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1158598ecfbaSChristoph Hellwig return -1; 1159b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1160598ecfbaSChristoph Hellwig return 1; 1161598ecfbaSChristoph Hellwig return 0; 1162598ecfbaSChristoph Hellwig } 1163598ecfbaSChristoph Hellwig 1164598ecfbaSChristoph Hellwig void 1165598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1166598ecfbaSChristoph Hellwig { 1167598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1168598ecfbaSChristoph Hellwig } 1169598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1170598ecfbaSChristoph Hellwig 1171598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1172598ecfbaSChristoph Hellwig { 1173598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1174598ecfbaSChristoph Hellwig 1175598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1176598ecfbaSChristoph Hellwig } 1177598ecfbaSChristoph Hellwig 1178598ecfbaSChristoph Hellwig /* 1179598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1180598ecfbaSChristoph Hellwig * 1181598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1182f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback 1183598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1184598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1185598ecfbaSChristoph Hellwig */ 1186598ecfbaSChristoph Hellwig static int 1187598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1188598ecfbaSChristoph Hellwig int error) 1189598ecfbaSChristoph Hellwig { 1190598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1191598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1192598ecfbaSChristoph Hellwig 1193598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1194598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1195598ecfbaSChristoph Hellwig if (error) { 1196598ecfbaSChristoph Hellwig /* 1197f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an 1198598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1199598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1200598ecfbaSChristoph Hellwig * time. 1201598ecfbaSChristoph Hellwig */ 1202598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1203598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1204598ecfbaSChristoph Hellwig return error; 1205598ecfbaSChristoph Hellwig } 1206598ecfbaSChristoph Hellwig 1207598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1208598ecfbaSChristoph Hellwig return 0; 1209598ecfbaSChristoph Hellwig } 1210598ecfbaSChristoph Hellwig 1211598ecfbaSChristoph Hellwig static struct iomap_ioend * 1212598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1213598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1214598ecfbaSChristoph Hellwig { 1215598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1216598ecfbaSChristoph Hellwig struct bio *bio; 1217598ecfbaSChristoph Hellwig 1218609be106SChristoph Hellwig bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1219609be106SChristoph Hellwig REQ_OP_WRITE | wbc_to_write_flags(wbc), 1220609be106SChristoph Hellwig GFP_NOFS, &iomap_ioend_bioset); 1221598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1222598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1223598ecfbaSChristoph Hellwig 1224598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1225598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1226598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1227598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1228598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1229598ecfbaSChristoph Hellwig ioend->io_size = 0; 1230ebb7fb15SDave Chinner ioend->io_folios = 0; 1231598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1232598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1233ebb7fb15SDave Chinner ioend->io_sector = sector; 1234598ecfbaSChristoph Hellwig return ioend; 1235598ecfbaSChristoph Hellwig } 1236598ecfbaSChristoph Hellwig 1237598ecfbaSChristoph Hellwig /* 1238598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1239598ecfbaSChristoph Hellwig * 1240f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order 1241598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1242598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1243598ecfbaSChristoph Hellwig */ 1244598ecfbaSChristoph Hellwig static struct bio * 1245598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1246598ecfbaSChristoph Hellwig { 1247598ecfbaSChristoph Hellwig struct bio *new; 1248598ecfbaSChristoph Hellwig 124907888c66SChristoph Hellwig new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 125007888c66SChristoph Hellwig bio_clone_blkg_association(new, prev); 1251598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1252598ecfbaSChristoph Hellwig 1253598ecfbaSChristoph Hellwig bio_chain(prev, new); 1254598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1255598ecfbaSChristoph Hellwig submit_bio(prev); 1256598ecfbaSChristoph Hellwig return new; 1257598ecfbaSChristoph Hellwig } 1258598ecfbaSChristoph Hellwig 1259598ecfbaSChristoph Hellwig static bool 1260598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1261598ecfbaSChristoph Hellwig sector_t sector) 1262598ecfbaSChristoph Hellwig { 1263598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1264598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1265598ecfbaSChristoph Hellwig return false; 1266598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1267598ecfbaSChristoph Hellwig return false; 1268598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1269598ecfbaSChristoph Hellwig return false; 1270598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1271598ecfbaSChristoph Hellwig return false; 1272ebb7fb15SDave Chinner /* 1273ebb7fb15SDave Chinner * Limit ioend bio chain lengths to minimise IO completion latency. This 1274ebb7fb15SDave Chinner * also prevents long tight loops ending page writeback on all the 1275ebb7fb15SDave Chinner * folios in the ioend. 1276ebb7fb15SDave Chinner */ 1277ebb7fb15SDave Chinner if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1278ebb7fb15SDave Chinner return false; 1279598ecfbaSChristoph Hellwig return true; 1280598ecfbaSChristoph Hellwig } 1281598ecfbaSChristoph Hellwig 1282598ecfbaSChristoph Hellwig /* 1283598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1284f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another. 1285598ecfbaSChristoph Hellwig */ 1286598ecfbaSChristoph Hellwig static void 1287e735c007SMatthew Wilcox (Oracle) iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1288598ecfbaSChristoph Hellwig struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1289598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1290598ecfbaSChristoph Hellwig { 1291e735c007SMatthew Wilcox (Oracle) sector_t sector = iomap_sector(&wpc->iomap, pos); 1292598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1293e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, pos); 1294598ecfbaSChristoph Hellwig 1295e735c007SMatthew Wilcox (Oracle) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1296598ecfbaSChristoph Hellwig if (wpc->ioend) 1297598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1298e735c007SMatthew Wilcox (Oracle) wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1299598ecfbaSChristoph Hellwig } 1300598ecfbaSChristoph Hellwig 1301e735c007SMatthew Wilcox (Oracle) if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1302c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1303e735c007SMatthew Wilcox (Oracle) bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1304c1b79f11SChristoph Hellwig } 1305c1b79f11SChristoph Hellwig 13060fb2d720SMatthew Wilcox (Oracle) if (iop) 13070fb2d720SMatthew Wilcox (Oracle) atomic_add(len, &iop->write_bytes_pending); 1308598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1309e735c007SMatthew Wilcox (Oracle) wbc_account_cgroup_owner(wbc, &folio->page, len); 1310598ecfbaSChristoph Hellwig } 1311598ecfbaSChristoph Hellwig 1312598ecfbaSChristoph Hellwig /* 1313598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1314598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1315f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're 1316f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block 1317f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that 1318598ecfbaSChristoph Hellwig * instead. 1319598ecfbaSChristoph Hellwig * 1320598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1321598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1322598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1323598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1324598ecfbaSChristoph Hellwig * 1325598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1326598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1327598ecfbaSChristoph Hellwig */ 1328598ecfbaSChristoph Hellwig static int 1329598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1330598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1331e735c007SMatthew Wilcox (Oracle) struct folio *folio, u64 end_pos) 1332598ecfbaSChristoph Hellwig { 1333435d44b3SMatthew Wilcox (Oracle) struct iomap_page *iop = iomap_page_create(inode, folio); 1334598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1335598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 133692655036SMatthew Wilcox (Oracle) unsigned nblocks = i_blocks_per_folio(inode, folio); 133792655036SMatthew Wilcox (Oracle) u64 pos = folio_pos(folio); 1338598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1339598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1340598ecfbaSChristoph Hellwig 13410fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1342598ecfbaSChristoph Hellwig 1343598ecfbaSChristoph Hellwig /* 134492655036SMatthew Wilcox (Oracle) * Walk through the folio to find areas to write back. If we 134592655036SMatthew Wilcox (Oracle) * run off the end of the current map or find the current map 134692655036SMatthew Wilcox (Oracle) * invalid, grab a new one. 1347598ecfbaSChristoph Hellwig */ 134892655036SMatthew Wilcox (Oracle) for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1349598ecfbaSChristoph Hellwig if (iop && !test_bit(i, iop->uptodate)) 1350598ecfbaSChristoph Hellwig continue; 1351598ecfbaSChristoph Hellwig 135292655036SMatthew Wilcox (Oracle) error = wpc->ops->map_blocks(wpc, inode, pos); 1353598ecfbaSChristoph Hellwig if (error) 1354598ecfbaSChristoph Hellwig break; 13553e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 13563e19e6f3SChristoph Hellwig continue; 1357598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1358598ecfbaSChristoph Hellwig continue; 1359e735c007SMatthew Wilcox (Oracle) iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1360598ecfbaSChristoph Hellwig &submit_list); 1361598ecfbaSChristoph Hellwig count++; 1362598ecfbaSChristoph Hellwig } 1363ebb7fb15SDave Chinner if (count) 1364ebb7fb15SDave Chinner wpc->ioend->io_folios++; 1365598ecfbaSChristoph Hellwig 1366598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1367e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_locked(folio)); 1368e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 1369e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_dirty(folio)); 1370598ecfbaSChristoph Hellwig 1371598ecfbaSChristoph Hellwig /* 1372598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1373598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1374598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1375598ecfbaSChristoph Hellwig * appropriately. 1376598ecfbaSChristoph Hellwig */ 1377598ecfbaSChristoph Hellwig if (unlikely(error)) { 1378598ecfbaSChristoph Hellwig /* 1379763e4cdcSBrian Foster * Let the filesystem know what portion of the current page 1380f1f264b4SAndreas Gruenbacher * failed to map. If the page hasn't been added to ioend, it 1381763e4cdcSBrian Foster * won't be affected by I/O completion and we must unlock it 1382763e4cdcSBrian Foster * now. 1383598ecfbaSChristoph Hellwig */ 13846e478521SMatthew Wilcox (Oracle) if (wpc->ops->discard_folio) 138592655036SMatthew Wilcox (Oracle) wpc->ops->discard_folio(folio, pos); 1386763e4cdcSBrian Foster if (!count) { 1387e735c007SMatthew Wilcox (Oracle) folio_clear_uptodate(folio); 1388e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1389598ecfbaSChristoph Hellwig goto done; 1390598ecfbaSChristoph Hellwig } 1391598ecfbaSChristoph Hellwig } 1392598ecfbaSChristoph Hellwig 1393e735c007SMatthew Wilcox (Oracle) folio_start_writeback(folio); 1394e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1395598ecfbaSChristoph Hellwig 1396598ecfbaSChristoph Hellwig /* 1397f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch 1398598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1399598ecfbaSChristoph Hellwig * submissions. 1400598ecfbaSChristoph Hellwig */ 1401598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1402598ecfbaSChristoph Hellwig int error2; 1403598ecfbaSChristoph Hellwig 1404598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1405598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1406598ecfbaSChristoph Hellwig if (error2 && !error) 1407598ecfbaSChristoph Hellwig error = error2; 1408598ecfbaSChristoph Hellwig } 1409598ecfbaSChristoph Hellwig 1410598ecfbaSChristoph Hellwig /* 1411598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1412598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1413598ecfbaSChristoph Hellwig */ 1414598ecfbaSChristoph Hellwig if (!count) 1415e735c007SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1416598ecfbaSChristoph Hellwig done: 1417e735c007SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, error); 1418598ecfbaSChristoph Hellwig return error; 1419598ecfbaSChristoph Hellwig } 1420598ecfbaSChristoph Hellwig 1421598ecfbaSChristoph Hellwig /* 1422598ecfbaSChristoph Hellwig * Write out a dirty page. 1423598ecfbaSChristoph Hellwig * 1424f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it. 1425f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to 1426598ecfbaSChristoph Hellwig * regular allocated space. 1427598ecfbaSChristoph Hellwig */ 1428598ecfbaSChristoph Hellwig static int 1429598ecfbaSChristoph Hellwig iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1430598ecfbaSChristoph Hellwig { 1431e735c007SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1432598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1433e735c007SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 143481d4782aSMatthew Wilcox (Oracle) u64 end_pos, isize; 1435598ecfbaSChristoph Hellwig 1436e735c007SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1437598ecfbaSChristoph Hellwig 1438598ecfbaSChristoph Hellwig /* 1439e735c007SMatthew Wilcox (Oracle) * Refuse to write the folio out if we're called from reclaim context. 1440598ecfbaSChristoph Hellwig * 1441598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1442598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1443598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1444598ecfbaSChristoph Hellwig * 1445598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1446598ecfbaSChristoph Hellwig * warn about it. 1447598ecfbaSChristoph Hellwig */ 1448598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1449598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1450598ecfbaSChristoph Hellwig goto redirty; 1451598ecfbaSChristoph Hellwig 1452598ecfbaSChristoph Hellwig /* 1453e735c007SMatthew Wilcox (Oracle) * Is this folio beyond the end of the file? 1454598ecfbaSChristoph Hellwig * 1455e735c007SMatthew Wilcox (Oracle) * The folio index is less than the end_index, adjust the end_pos 1456e735c007SMatthew Wilcox (Oracle) * to the highest offset that this folio should represent. 1457598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1458598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1459598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1460598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1461598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1462598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1463598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1464598ecfbaSChristoph Hellwig */ 146581d4782aSMatthew Wilcox (Oracle) isize = i_size_read(inode); 1466e735c007SMatthew Wilcox (Oracle) end_pos = folio_pos(folio) + folio_size(folio); 146781d4782aSMatthew Wilcox (Oracle) if (end_pos > isize) { 1468598ecfbaSChristoph Hellwig /* 1469598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1470598ecfbaSChristoph Hellwig * i_size or not. 1471598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1472598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1473598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1474598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1475598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1476598ecfbaSChristoph Hellwig * | | Straddles | 1477598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1478598ecfbaSChristoph Hellwig */ 1479e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, isize); 148081d4782aSMatthew Wilcox (Oracle) pgoff_t end_index = isize >> PAGE_SHIFT; 1481598ecfbaSChristoph Hellwig 1482598ecfbaSChristoph Hellwig /* 1483f1f264b4SAndreas Gruenbacher * Skip the page if it's fully outside i_size, e.g. due to a 1484f1f264b4SAndreas Gruenbacher * truncate operation that's in progress. We must redirty the 1485598ecfbaSChristoph Hellwig * page so that reclaim stops reclaiming it. Otherwise 1486598ecfbaSChristoph Hellwig * iomap_vm_releasepage() is called on it and gets confused. 1487598ecfbaSChristoph Hellwig * 1488f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given 1489f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we 1490f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with 1491f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would 1492f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be 1493f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in 1494f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation 1495f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by 1496f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its 1497598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1498598ecfbaSChristoph Hellwig */ 1499e735c007SMatthew Wilcox (Oracle) if (folio->index > end_index || 1500e735c007SMatthew Wilcox (Oracle) (folio->index == end_index && poff == 0)) 1501598ecfbaSChristoph Hellwig goto redirty; 1502598ecfbaSChristoph Hellwig 1503598ecfbaSChristoph Hellwig /* 1504598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1505598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1506598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1507598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1508598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1509598ecfbaSChristoph Hellwig * not written out to the file." 1510598ecfbaSChristoph Hellwig */ 1511e735c007SMatthew Wilcox (Oracle) folio_zero_segment(folio, poff, folio_size(folio)); 151281d4782aSMatthew Wilcox (Oracle) end_pos = isize; 1513598ecfbaSChristoph Hellwig } 1514598ecfbaSChristoph Hellwig 1515e735c007SMatthew Wilcox (Oracle) return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1516598ecfbaSChristoph Hellwig 1517598ecfbaSChristoph Hellwig redirty: 1518e735c007SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio); 1519e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1520598ecfbaSChristoph Hellwig return 0; 1521598ecfbaSChristoph Hellwig } 1522598ecfbaSChristoph Hellwig 1523598ecfbaSChristoph Hellwig int 1524598ecfbaSChristoph Hellwig iomap_writepage(struct page *page, struct writeback_control *wbc, 1525598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1526598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1527598ecfbaSChristoph Hellwig { 1528598ecfbaSChristoph Hellwig int ret; 1529598ecfbaSChristoph Hellwig 1530598ecfbaSChristoph Hellwig wpc->ops = ops; 1531598ecfbaSChristoph Hellwig ret = iomap_do_writepage(page, wbc, wpc); 1532598ecfbaSChristoph Hellwig if (!wpc->ioend) 1533598ecfbaSChristoph Hellwig return ret; 1534598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1535598ecfbaSChristoph Hellwig } 1536598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepage); 1537598ecfbaSChristoph Hellwig 1538598ecfbaSChristoph Hellwig int 1539598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1540598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1541598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1542598ecfbaSChristoph Hellwig { 1543598ecfbaSChristoph Hellwig int ret; 1544598ecfbaSChristoph Hellwig 1545598ecfbaSChristoph Hellwig wpc->ops = ops; 1546598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1547598ecfbaSChristoph Hellwig if (!wpc->ioend) 1548598ecfbaSChristoph Hellwig return ret; 1549598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1550598ecfbaSChristoph Hellwig } 1551598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1552598ecfbaSChristoph Hellwig 1553598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1554598ecfbaSChristoph Hellwig { 1555598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1556598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1557598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1558598ecfbaSChristoph Hellwig } 1559598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1560