1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ebb7fb15SDave Chinner #define IOEND_BATCH_SIZE 4096 25ebb7fb15SDave Chinner 26ab08b01eSChristoph Hellwig /* 2704f52c4eSRitesh Harjani (IBM) * Structure allocated for each folio to track per-block uptodate state 2804f52c4eSRitesh Harjani (IBM) * and I/O completions. 29ab08b01eSChristoph Hellwig */ 3004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state { 317d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending; 320fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending; 3304f52c4eSRitesh Harjani (IBM) spinlock_t state_lock; 3404f52c4eSRitesh Harjani (IBM) unsigned long state[]; 35ab08b01eSChristoph Hellwig }; 36ab08b01eSChristoph Hellwig 37598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 38598ecfbaSChristoph Hellwig 39*3ea5c76cSRitesh Harjani (IBM) static void ifs_set_range_uptodate(struct folio *folio, 40*3ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len) 41*3ea5c76cSRitesh Harjani (IBM) { 42*3ea5c76cSRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 43*3ea5c76cSRitesh Harjani (IBM) unsigned int first_blk = off >> inode->i_blkbits; 44*3ea5c76cSRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 45*3ea5c76cSRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1; 46*3ea5c76cSRitesh Harjani (IBM) unsigned long flags; 47*3ea5c76cSRitesh Harjani (IBM) 48*3ea5c76cSRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags); 49*3ea5c76cSRitesh Harjani (IBM) bitmap_set(ifs->state, first_blk, nr_blks); 50*3ea5c76cSRitesh Harjani (IBM) if (bitmap_full(ifs->state, i_blocks_per_folio(inode, folio))) 51*3ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio); 52*3ea5c76cSRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags); 53*3ea5c76cSRitesh Harjani (IBM) } 54*3ea5c76cSRitesh Harjani (IBM) 55*3ea5c76cSRitesh Harjani (IBM) static void iomap_set_range_uptodate(struct folio *folio, size_t off, 56*3ea5c76cSRitesh Harjani (IBM) size_t len) 57*3ea5c76cSRitesh Harjani (IBM) { 58*3ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 59*3ea5c76cSRitesh Harjani (IBM) 60*3ea5c76cSRitesh Harjani (IBM) if (ifs) 61*3ea5c76cSRitesh Harjani (IBM) ifs_set_range_uptodate(folio, ifs, off, len); 62*3ea5c76cSRitesh Harjani (IBM) else 63*3ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio); 64*3ea5c76cSRitesh Harjani (IBM) } 65*3ea5c76cSRitesh Harjani (IBM) 6604f52c4eSRitesh Harjani (IBM) static struct iomap_folio_state *ifs_alloc(struct inode *inode, 6704f52c4eSRitesh Harjani (IBM) struct folio *folio, unsigned int flags) 68afc51aaaSDarrick J. Wong { 6904f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 70435d44b3SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 719753b868SStefan Roesch gfp_t gfp; 72afc51aaaSDarrick J. Wong 7304f52c4eSRitesh Harjani (IBM) if (ifs || nr_blocks <= 1) 7404f52c4eSRitesh Harjani (IBM) return ifs; 75afc51aaaSDarrick J. Wong 769753b868SStefan Roesch if (flags & IOMAP_NOWAIT) 779753b868SStefan Roesch gfp = GFP_NOWAIT; 789753b868SStefan Roesch else 799753b868SStefan Roesch gfp = GFP_NOFS | __GFP_NOFAIL; 809753b868SStefan Roesch 8104f52c4eSRitesh Harjani (IBM) ifs = kzalloc(struct_size(ifs, state, BITS_TO_LONGS(nr_blocks)), 829753b868SStefan Roesch gfp); 8304f52c4eSRitesh Harjani (IBM) if (ifs) { 8404f52c4eSRitesh Harjani (IBM) spin_lock_init(&ifs->state_lock); 85435d44b3SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 8604f52c4eSRitesh Harjani (IBM) bitmap_fill(ifs->state, nr_blocks); 8704f52c4eSRitesh Harjani (IBM) folio_attach_private(folio, ifs); 889753b868SStefan Roesch } 8904f52c4eSRitesh Harjani (IBM) return ifs; 90afc51aaaSDarrick J. Wong } 91afc51aaaSDarrick J. Wong 9204f52c4eSRitesh Harjani (IBM) static void ifs_free(struct folio *folio) 93afc51aaaSDarrick J. Wong { 9404f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio_detach_private(folio); 95c46e8324SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 96c46e8324SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 97afc51aaaSDarrick J. Wong 9804f52c4eSRitesh Harjani (IBM) if (!ifs) 99afc51aaaSDarrick J. Wong return; 10004f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending)); 10104f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); 10204f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(bitmap_full(ifs->state, nr_blocks) != 103c46e8324SMatthew Wilcox (Oracle) folio_test_uptodate(folio)); 10404f52c4eSRitesh Harjani (IBM) kfree(ifs); 105afc51aaaSDarrick J. Wong } 106afc51aaaSDarrick J. Wong 107afc51aaaSDarrick J. Wong /* 108431c0566SMatthew Wilcox (Oracle) * Calculate the range inside the folio that we actually need to read. 109afc51aaaSDarrick J. Wong */ 110431c0566SMatthew Wilcox (Oracle) static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 111431c0566SMatthew Wilcox (Oracle) loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 112afc51aaaSDarrick J. Wong { 11304f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 114afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 115afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 116afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 117afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 118431c0566SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, *pos); 119431c0566SMatthew Wilcox (Oracle) size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 120afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 121afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 122afc51aaaSDarrick J. Wong 123afc51aaaSDarrick J. Wong /* 124f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the 125afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 126afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 127afc51aaaSDarrick J. Wong */ 12804f52c4eSRitesh Harjani (IBM) if (ifs) { 129afc51aaaSDarrick J. Wong unsigned int i; 130afc51aaaSDarrick J. Wong 131afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 132afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 13304f52c4eSRitesh Harjani (IBM) if (!test_bit(i, ifs->state)) 134afc51aaaSDarrick J. Wong break; 135afc51aaaSDarrick J. Wong *pos += block_size; 136afc51aaaSDarrick J. Wong poff += block_size; 137afc51aaaSDarrick J. Wong plen -= block_size; 138afc51aaaSDarrick J. Wong first++; 139afc51aaaSDarrick J. Wong } 140afc51aaaSDarrick J. Wong 141afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 142afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 14304f52c4eSRitesh Harjani (IBM) if (test_bit(i, ifs->state)) { 144afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 145afc51aaaSDarrick J. Wong last = i - 1; 146afc51aaaSDarrick J. Wong break; 147afc51aaaSDarrick J. Wong } 148afc51aaaSDarrick J. Wong } 149afc51aaaSDarrick J. Wong } 150afc51aaaSDarrick J. Wong 151afc51aaaSDarrick J. Wong /* 152f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to 153afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 154afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 155afc51aaaSDarrick J. Wong */ 156afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 157431c0566SMatthew Wilcox (Oracle) unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 158afc51aaaSDarrick J. Wong 159afc51aaaSDarrick J. Wong if (first <= end && last > end) 160afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 161afc51aaaSDarrick J. Wong } 162afc51aaaSDarrick J. Wong 163afc51aaaSDarrick J. Wong *offp = poff; 164afc51aaaSDarrick J. Wong *lenp = plen; 165afc51aaaSDarrick J. Wong } 166afc51aaaSDarrick J. Wong 1678ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_read(struct folio *folio, size_t offset, 1688ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 169afc51aaaSDarrick J. Wong { 17004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 171afc51aaaSDarrick J. Wong 172afc51aaaSDarrick J. Wong if (unlikely(error)) { 1738ffd74e9SMatthew Wilcox (Oracle) folio_clear_uptodate(folio); 1748ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 175afc51aaaSDarrick J. Wong } else { 176*3ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, len); 177afc51aaaSDarrick J. Wong } 178afc51aaaSDarrick J. Wong 17904f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending)) 1808ffd74e9SMatthew Wilcox (Oracle) folio_unlock(folio); 181afc51aaaSDarrick J. Wong } 182afc51aaaSDarrick J. Wong 1838ffd74e9SMatthew Wilcox (Oracle) static void iomap_read_end_io(struct bio *bio) 184afc51aaaSDarrick J. Wong { 185afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 1868ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 187afc51aaaSDarrick J. Wong 1888ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio) 1898ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 190afc51aaaSDarrick J. Wong bio_put(bio); 191afc51aaaSDarrick J. Wong } 192afc51aaaSDarrick J. Wong 193afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 1943aa9c659SMatthew Wilcox (Oracle) struct folio *cur_folio; 1953aa9c659SMatthew Wilcox (Oracle) bool cur_folio_in_bio; 196afc51aaaSDarrick J. Wong struct bio *bio; 1979d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac; 198afc51aaaSDarrick J. Wong }; 199afc51aaaSDarrick J. Wong 2005ad448ceSAndreas Gruenbacher /** 2015ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache 2025ad448ceSAndreas Gruenbacher * @iter: iteration structure 203874628a2SMatthew Wilcox (Oracle) * @folio: folio to copy to 2045ad448ceSAndreas Gruenbacher * 205874628a2SMatthew Wilcox (Oracle) * Copy the inline data in @iter into @folio and zero out the rest of the folio. 2065ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file. 2075ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno. 2085ad448ceSAndreas Gruenbacher */ 2095ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter, 210874628a2SMatthew Wilcox (Oracle) struct folio *folio) 211afc51aaaSDarrick J. Wong { 212fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter); 2131b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset; 214b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset); 215431c0566SMatthew Wilcox (Oracle) size_t offset = offset_in_folio(folio, iomap->offset); 216afc51aaaSDarrick J. Wong void *addr; 217afc51aaaSDarrick J. Wong 218874628a2SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 2195ad448ceSAndreas Gruenbacher return 0; 220afc51aaaSDarrick J. Wong 221ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 222ae44f9c2SMatthew Wilcox (Oracle) return -EIO; 22369f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE - 22469f4a26cSGao Xiang offset_in_page(iomap->inline_data))) 22569f4a26cSGao Xiang return -EIO; 22669f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length)) 22769f4a26cSGao Xiang return -EIO; 228431c0566SMatthew Wilcox (Oracle) if (offset > 0) 229*3ea5c76cSRitesh Harjani (IBM) ifs_alloc(iter->inode, folio, iter->flags); 230afc51aaaSDarrick J. Wong 231874628a2SMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, offset); 232afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 233b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size); 234ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 235*3ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff); 2365ad448ceSAndreas Gruenbacher return 0; 237afc51aaaSDarrick J. Wong } 238afc51aaaSDarrick J. Wong 239fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 2401b5c1e36SChristoph Hellwig loff_t pos) 241009d8d84SChristoph Hellwig { 242fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 2431b5c1e36SChristoph Hellwig 2441b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED || 2451b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) || 2461b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode); 247009d8d84SChristoph Hellwig } 248009d8d84SChristoph Hellwig 249fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 250f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset) 251afc51aaaSDarrick J. Wong { 252fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 253f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset; 254f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset; 2553aa9c659SMatthew Wilcox (Oracle) struct folio *folio = ctx->cur_folio; 25604f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs; 257afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 258431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 259afc51aaaSDarrick J. Wong sector_t sector; 260afc51aaaSDarrick J. Wong 2615ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE) 262874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 263afc51aaaSDarrick J. Wong 264afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 26504f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags); 266431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 267afc51aaaSDarrick J. Wong if (plen == 0) 268afc51aaaSDarrick J. Wong goto done; 269afc51aaaSDarrick J. Wong 2701b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) { 271431c0566SMatthew Wilcox (Oracle) folio_zero_range(folio, poff, plen); 272*3ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen); 273afc51aaaSDarrick J. Wong goto done; 274afc51aaaSDarrick J. Wong } 275afc51aaaSDarrick J. Wong 2763aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = true; 27704f52c4eSRitesh Harjani (IBM) if (ifs) 27804f52c4eSRitesh Harjani (IBM) atomic_add(plen, &ifs->read_bytes_pending); 279afc51aaaSDarrick J. Wong 280afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 281d0364f94SChristoph Hellwig if (!ctx->bio || 282d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector || 283431c0566SMatthew Wilcox (Oracle) !bio_add_folio(ctx->bio, folio, plen, poff)) { 2843aa9c659SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 285457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp; 2865f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 287afc51aaaSDarrick J. Wong 288afc51aaaSDarrick J. Wong if (ctx->bio) 289afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 290afc51aaaSDarrick J. Wong 2919d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */ 292afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 29307888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 29407888c66SChristoph Hellwig REQ_OP_READ, gfp); 295457df33eSMatthew Wilcox (Oracle) /* 296457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to 297457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates 298f132ab7dSMatthew Wilcox (Oracle) * what do_mpage_read_folio does. 299457df33eSMatthew Wilcox (Oracle) */ 30007888c66SChristoph Hellwig if (!ctx->bio) { 30107888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 30207888c66SChristoph Hellwig orig_gfp); 30307888c66SChristoph Hellwig } 3049d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) 305afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 306afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 307afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 308c2478469SJohannes Thumshirn bio_add_folio_nofail(ctx->bio, folio, plen, poff); 309afc51aaaSDarrick J. Wong } 310431c0566SMatthew Wilcox (Oracle) 311afc51aaaSDarrick J. Wong done: 312afc51aaaSDarrick J. Wong /* 313afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 314f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but 315afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 316afc51aaaSDarrick J. Wong * iteration. 317afc51aaaSDarrick J. Wong */ 318afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 319afc51aaaSDarrick J. Wong } 320afc51aaaSDarrick J. Wong 3217479c505SMatthew Wilcox (Oracle) int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 322afc51aaaSDarrick J. Wong { 323f6d48000SChristoph Hellwig struct iomap_iter iter = { 3243aa9c659SMatthew Wilcox (Oracle) .inode = folio->mapping->host, 3253aa9c659SMatthew Wilcox (Oracle) .pos = folio_pos(folio), 3263aa9c659SMatthew Wilcox (Oracle) .len = folio_size(folio), 327f6d48000SChristoph Hellwig }; 328f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = { 3293aa9c659SMatthew Wilcox (Oracle) .cur_folio = folio, 330f6d48000SChristoph Hellwig }; 331f6d48000SChristoph Hellwig int ret; 332afc51aaaSDarrick J. Wong 3333aa9c659SMatthew Wilcox (Oracle) trace_iomap_readpage(iter.inode, 1); 3349e91c572SChristoph Hellwig 335f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 336f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 337f6d48000SChristoph Hellwig 338f6d48000SChristoph Hellwig if (ret < 0) 3393aa9c659SMatthew Wilcox (Oracle) folio_set_error(folio); 340afc51aaaSDarrick J. Wong 341afc51aaaSDarrick J. Wong if (ctx.bio) { 342afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 3433aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(!ctx.cur_folio_in_bio); 344afc51aaaSDarrick J. Wong } else { 3453aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(ctx.cur_folio_in_bio); 3463aa9c659SMatthew Wilcox (Oracle) folio_unlock(folio); 347afc51aaaSDarrick J. Wong } 348afc51aaaSDarrick J. Wong 349afc51aaaSDarrick J. Wong /* 3502c69e205SMatthew Wilcox (Oracle) * Just like mpage_readahead and block_read_full_folio, we always 3517479c505SMatthew Wilcox (Oracle) * return 0 and just set the folio error flag on errors. This 352f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually. 353afc51aaaSDarrick J. Wong */ 354afc51aaaSDarrick J. Wong return 0; 355afc51aaaSDarrick J. Wong } 3567479c505SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_read_folio); 357afc51aaaSDarrick J. Wong 358fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 359f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx) 360afc51aaaSDarrick J. Wong { 361f6d48000SChristoph Hellwig loff_t length = iomap_length(iter); 362afc51aaaSDarrick J. Wong loff_t done, ret; 363afc51aaaSDarrick J. Wong 364afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 3653aa9c659SMatthew Wilcox (Oracle) if (ctx->cur_folio && 3663aa9c659SMatthew Wilcox (Oracle) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 3673aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio_in_bio) 3683aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx->cur_folio); 3693aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = NULL; 370afc51aaaSDarrick J. Wong } 3713aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio) { 3723aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = readahead_folio(ctx->rac); 3733aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = false; 374afc51aaaSDarrick J. Wong } 375f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done); 376d8af404fSAndreas Gruenbacher if (ret <= 0) 377d8af404fSAndreas Gruenbacher return ret; 378afc51aaaSDarrick J. Wong } 379afc51aaaSDarrick J. Wong 380afc51aaaSDarrick J. Wong return done; 381afc51aaaSDarrick J. Wong } 382afc51aaaSDarrick J. Wong 3839d24a13aSMatthew Wilcox (Oracle) /** 3849d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file. 3859d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read. 3869d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem. 3879d24a13aSMatthew Wilcox (Oracle) * 3889d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead 3899d24a13aSMatthew Wilcox (Oracle) * address_space operation. 3909d24a13aSMatthew Wilcox (Oracle) * 3919d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of 3929d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to 3939d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided. 3949d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This 3959d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause 3969d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered. 3979d24a13aSMatthew Wilcox (Oracle) */ 3989d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 399afc51aaaSDarrick J. Wong { 400f6d48000SChristoph Hellwig struct iomap_iter iter = { 401f6d48000SChristoph Hellwig .inode = rac->mapping->host, 402f6d48000SChristoph Hellwig .pos = readahead_pos(rac), 403f6d48000SChristoph Hellwig .len = readahead_length(rac), 404f6d48000SChristoph Hellwig }; 405afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 4069d24a13aSMatthew Wilcox (Oracle) .rac = rac, 407afc51aaaSDarrick J. Wong }; 408afc51aaaSDarrick J. Wong 409f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 4109e91c572SChristoph Hellwig 411f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0) 412f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx); 4139d24a13aSMatthew Wilcox (Oracle) 414afc51aaaSDarrick J. Wong if (ctx.bio) 415afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 4163aa9c659SMatthew Wilcox (Oracle) if (ctx.cur_folio) { 4173aa9c659SMatthew Wilcox (Oracle) if (!ctx.cur_folio_in_bio) 4183aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx.cur_folio); 419afc51aaaSDarrick J. Wong } 420afc51aaaSDarrick J. Wong } 4219d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead); 422afc51aaaSDarrick J. Wong 423afc51aaaSDarrick J. Wong /* 4242e7e80f7SMatthew Wilcox (Oracle) * iomap_is_partially_uptodate checks whether blocks within a folio are 425afc51aaaSDarrick J. Wong * uptodate or not. 426afc51aaaSDarrick J. Wong * 4272e7e80f7SMatthew Wilcox (Oracle) * Returns true if all blocks which correspond to the specified part 4282e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 429afc51aaaSDarrick J. Wong */ 4302e7e80f7SMatthew Wilcox (Oracle) bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 431afc51aaaSDarrick J. Wong { 43204f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 4332e7e80f7SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 4342e7e80f7SMatthew Wilcox (Oracle) unsigned first, last, i; 435afc51aaaSDarrick J. Wong 43604f52c4eSRitesh Harjani (IBM) if (!ifs) 4372e7e80f7SMatthew Wilcox (Oracle) return false; 4382e7e80f7SMatthew Wilcox (Oracle) 4392756c818SMatthew Wilcox (Oracle) /* Caller's range may extend past the end of this folio */ 4402756c818SMatthew Wilcox (Oracle) count = min(folio_size(folio) - from, count); 441afc51aaaSDarrick J. Wong 4422756c818SMatthew Wilcox (Oracle) /* First and last blocks in range within folio */ 443afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 4442756c818SMatthew Wilcox (Oracle) last = (from + count - 1) >> inode->i_blkbits; 445afc51aaaSDarrick J. Wong 446afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 44704f52c4eSRitesh Harjani (IBM) if (!test_bit(i, ifs->state)) 4482e7e80f7SMatthew Wilcox (Oracle) return false; 4492e7e80f7SMatthew Wilcox (Oracle) return true; 450afc51aaaSDarrick J. Wong } 451afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 452afc51aaaSDarrick J. Wong 45398321b51SAndreas Gruenbacher /** 45498321b51SAndreas Gruenbacher * iomap_get_folio - get a folio reference for writing 45598321b51SAndreas Gruenbacher * @iter: iteration structure 45698321b51SAndreas Gruenbacher * @pos: start offset of write 457d6bb59a9SMatthew Wilcox (Oracle) * @len: Suggested size of folio to create. 45898321b51SAndreas Gruenbacher * 45998321b51SAndreas Gruenbacher * Returns a locked reference to the folio at @pos, or an error pointer if the 46098321b51SAndreas Gruenbacher * folio could not be obtained. 46198321b51SAndreas Gruenbacher */ 462d6bb59a9SMatthew Wilcox (Oracle) struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) 46398321b51SAndreas Gruenbacher { 464ffc143dbSMatthew Wilcox (Oracle) fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; 46598321b51SAndreas Gruenbacher 46698321b51SAndreas Gruenbacher if (iter->flags & IOMAP_NOWAIT) 46798321b51SAndreas Gruenbacher fgp |= FGP_NOWAIT; 468d6bb59a9SMatthew Wilcox (Oracle) fgp |= fgf_set_order(len); 46998321b51SAndreas Gruenbacher 47066dabbb6SChristoph Hellwig return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 47198321b51SAndreas Gruenbacher fgp, mapping_gfp_mask(iter->inode->i_mapping)); 47298321b51SAndreas Gruenbacher } 47398321b51SAndreas Gruenbacher EXPORT_SYMBOL_GPL(iomap_get_folio); 47498321b51SAndreas Gruenbacher 4758597447dSMatthew Wilcox (Oracle) bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 476afc51aaaSDarrick J. Wong { 4778597447dSMatthew Wilcox (Oracle) trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 47839f16c83SMatthew Wilcox (Oracle) folio_size(folio)); 4799e91c572SChristoph Hellwig 480afc51aaaSDarrick J. Wong /* 4817a8eb01bSMatthew Wilcox (Oracle) * If the folio is dirty, we refuse to release our metadata because 4827a8eb01bSMatthew Wilcox (Oracle) * it may be partially dirty. Once we track per-block dirty state, 4837a8eb01bSMatthew Wilcox (Oracle) * we can release the metadata if every block is dirty. 484afc51aaaSDarrick J. Wong */ 4857a8eb01bSMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) 4868597447dSMatthew Wilcox (Oracle) return false; 48704f52c4eSRitesh Harjani (IBM) ifs_free(folio); 4888597447dSMatthew Wilcox (Oracle) return true; 489afc51aaaSDarrick J. Wong } 4908597447dSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_release_folio); 491afc51aaaSDarrick J. Wong 4928306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 493afc51aaaSDarrick J. Wong { 494d82354f6SMatthew Wilcox (Oracle) trace_iomap_invalidate_folio(folio->mapping->host, 4951241ebecSMatthew Wilcox (Oracle) folio_pos(folio) + offset, len); 4969e91c572SChristoph Hellwig 497afc51aaaSDarrick J. Wong /* 49860d82310SMatthew Wilcox (Oracle) * If we're invalidating the entire folio, clear the dirty state 49960d82310SMatthew Wilcox (Oracle) * from it and release it to avoid unnecessary buildup of the LRU. 500afc51aaaSDarrick J. Wong */ 5018306a5f5SMatthew Wilcox (Oracle) if (offset == 0 && len == folio_size(folio)) { 5028306a5f5SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 5038306a5f5SMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 50404f52c4eSRitesh Harjani (IBM) ifs_free(folio); 505afc51aaaSDarrick J. Wong } 506afc51aaaSDarrick J. Wong } 5078306a5f5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 5088306a5f5SMatthew Wilcox (Oracle) 509afc51aaaSDarrick J. Wong static void 510afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 511afc51aaaSDarrick J. Wong { 512afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 513afc51aaaSDarrick J. Wong 514afc51aaaSDarrick J. Wong /* 515afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 516afc51aaaSDarrick J. Wong * write started inside the existing inode size. 517afc51aaaSDarrick J. Wong */ 518afc51aaaSDarrick J. Wong if (pos + len > i_size) 519b71450e2SAndreas Gruenbacher truncate_pagecache_range(inode, max(pos, i_size), 520b71450e2SAndreas Gruenbacher pos + len - 1); 521afc51aaaSDarrick J. Wong } 522afc51aaaSDarrick J. Wong 523431c0566SMatthew Wilcox (Oracle) static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 524431c0566SMatthew Wilcox (Oracle) size_t poff, size_t plen, const struct iomap *iomap) 525afc51aaaSDarrick J. Wong { 526afc51aaaSDarrick J. Wong struct bio_vec bvec; 527afc51aaaSDarrick J. Wong struct bio bio; 528afc51aaaSDarrick J. Wong 52949add496SChristoph Hellwig bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 530afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 531c2478469SJohannes Thumshirn bio_add_folio_nofail(&bio, folio, plen, poff); 532afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 533afc51aaaSDarrick J. Wong } 534afc51aaaSDarrick J. Wong 535fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 536bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio *folio) 537afc51aaaSDarrick J. Wong { 538fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 53904f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs; 5401b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode); 5416cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size); 5426cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size); 543cae2de69SStefan Roesch unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); 544431c0566SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos), to = from + len; 545431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 546afc51aaaSDarrick J. Wong 547431c0566SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 548afc51aaaSDarrick J. Wong return 0; 549431c0566SMatthew Wilcox (Oracle) folio_clear_error(folio); 550afc51aaaSDarrick J. Wong 55104f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags); 55204f52c4eSRitesh Harjani (IBM) if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) 553cae2de69SStefan Roesch return -EAGAIN; 5549753b868SStefan Roesch 555afc51aaaSDarrick J. Wong do { 556431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &block_start, 557afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 558afc51aaaSDarrick J. Wong if (plen == 0) 559afc51aaaSDarrick J. Wong break; 560afc51aaaSDarrick J. Wong 561b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) && 56232a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) && 563d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen)) 564d3b40439SChristoph Hellwig continue; 565d3b40439SChristoph Hellwig 5661b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) { 567b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 56832a38a49SChristoph Hellwig return -EIO; 569431c0566SMatthew Wilcox (Oracle) folio_zero_segments(folio, poff, from, to, poff + plen); 57014284fedSMatthew Wilcox (Oracle) } else { 571cae2de69SStefan Roesch int status; 572cae2de69SStefan Roesch 573cae2de69SStefan Roesch if (iter->flags & IOMAP_NOWAIT) 574cae2de69SStefan Roesch return -EAGAIN; 575cae2de69SStefan Roesch 576cae2de69SStefan Roesch status = iomap_read_folio_sync(block_start, folio, 57714284fedSMatthew Wilcox (Oracle) poff, plen, srcmap); 578d3b40439SChristoph Hellwig if (status) 579d3b40439SChristoph Hellwig return status; 58014284fedSMatthew Wilcox (Oracle) } 581*3ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen); 582afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 583afc51aaaSDarrick J. Wong 584d3b40439SChristoph Hellwig return 0; 585afc51aaaSDarrick J. Wong } 586afc51aaaSDarrick J. Wong 58707c22b56SAndreas Gruenbacher static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, 58807c22b56SAndreas Gruenbacher size_t len) 58907c22b56SAndreas Gruenbacher { 590471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 59107c22b56SAndreas Gruenbacher 592471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->get_folio) 593471859f5SAndreas Gruenbacher return folio_ops->get_folio(iter, pos, len); 59407c22b56SAndreas Gruenbacher else 595d6bb59a9SMatthew Wilcox (Oracle) return iomap_get_folio(iter, pos, len); 59607c22b56SAndreas Gruenbacher } 59707c22b56SAndreas Gruenbacher 5987a70a508SAndreas Gruenbacher static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, 5997a70a508SAndreas Gruenbacher struct folio *folio) 6007a70a508SAndreas Gruenbacher { 601471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 6027a70a508SAndreas Gruenbacher 603471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->put_folio) { 604471859f5SAndreas Gruenbacher folio_ops->put_folio(iter->inode, pos, ret, folio); 6059060bc4dSAndreas Gruenbacher } else { 6067a70a508SAndreas Gruenbacher folio_unlock(folio); 6077a70a508SAndreas Gruenbacher folio_put(folio); 6087a70a508SAndreas Gruenbacher } 60980baab88SAndreas Gruenbacher } 6107a70a508SAndreas Gruenbacher 611fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter, 612bc6123a8SMatthew Wilcox (Oracle) struct folio *folio) 61369f4a26cSGao Xiang { 61469f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */ 6151b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 61669f4a26cSGao Xiang return -EIO; 617874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 61869f4a26cSGao Xiang } 61969f4a26cSGao Xiang 620d7b64041SDave Chinner static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, 621bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio **foliop) 622afc51aaaSDarrick J. Wong { 623471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 624fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 625d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio; 626afc51aaaSDarrick J. Wong int status = 0; 627afc51aaaSDarrick J. Wong 6281b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 6291b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap) 630c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length); 631afc51aaaSDarrick J. Wong 632afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 633afc51aaaSDarrick J. Wong return -EINTR; 634afc51aaaSDarrick J. Wong 635d454ab82SMatthew Wilcox (Oracle) if (!mapping_large_folio_support(iter->inode->i_mapping)) 636d454ab82SMatthew Wilcox (Oracle) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 637d454ab82SMatthew Wilcox (Oracle) 63807c22b56SAndreas Gruenbacher folio = __iomap_get_folio(iter, pos, len); 6399060bc4dSAndreas Gruenbacher if (IS_ERR(folio)) 64098321b51SAndreas Gruenbacher return PTR_ERR(folio); 641d7b64041SDave Chinner 642d7b64041SDave Chinner /* 643d7b64041SDave Chinner * Now we have a locked folio, before we do anything with it we need to 644d7b64041SDave Chinner * check that the iomap we have cached is not stale. The inode extent 645d7b64041SDave Chinner * mapping can change due to concurrent IO in flight (e.g. 646d7b64041SDave Chinner * IOMAP_UNWRITTEN state can change and memory reclaim could have 647d7b64041SDave Chinner * reclaimed a previously partially written page at this index after IO 648d7b64041SDave Chinner * completion before this write reaches this file offset) and hence we 649d7b64041SDave Chinner * could do the wrong thing here (zero a page range incorrectly or fail 650d7b64041SDave Chinner * to zero) and corrupt data. 651d7b64041SDave Chinner */ 652471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->iomap_valid) { 653471859f5SAndreas Gruenbacher bool iomap_valid = folio_ops->iomap_valid(iter->inode, 654d7b64041SDave Chinner &iter->iomap); 655d7b64041SDave Chinner if (!iomap_valid) { 656d7b64041SDave Chinner iter->iomap.flags |= IOMAP_F_STALE; 657d7b64041SDave Chinner status = 0; 658d7b64041SDave Chinner goto out_unlock; 659d7b64041SDave Chinner } 660d7b64041SDave Chinner } 661d7b64041SDave Chinner 662d454ab82SMatthew Wilcox (Oracle) if (pos + len > folio_pos(folio) + folio_size(folio)) 663d454ab82SMatthew Wilcox (Oracle) len = folio_pos(folio) + folio_size(folio) - pos; 664afc51aaaSDarrick J. Wong 665c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) 666bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin_inline(iter, folio); 6671b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 668d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 669afc51aaaSDarrick J. Wong else 670bc6123a8SMatthew Wilcox (Oracle) status = __iomap_write_begin(iter, pos, len, folio); 671afc51aaaSDarrick J. Wong 672afc51aaaSDarrick J. Wong if (unlikely(status)) 673afc51aaaSDarrick J. Wong goto out_unlock; 674afc51aaaSDarrick J. Wong 675bc6123a8SMatthew Wilcox (Oracle) *foliop = folio; 676afc51aaaSDarrick J. Wong return 0; 677afc51aaaSDarrick J. Wong 678afc51aaaSDarrick J. Wong out_unlock: 6797a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, 0, folio); 6801b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 681afc51aaaSDarrick J. Wong 682afc51aaaSDarrick J. Wong return status; 683afc51aaaSDarrick J. Wong } 684afc51aaaSDarrick J. Wong 685e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 686bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 687afc51aaaSDarrick J. Wong { 688bc6123a8SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 689afc51aaaSDarrick J. Wong 690afc51aaaSDarrick J. Wong /* 691afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 6927479c505SMatthew Wilcox (Oracle) * don't have to worry about a read_folio reading them and overwriting a 693f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only 694afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 6957479c505SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write. 696afc51aaaSDarrick J. Wong * 697f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a 698f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to 699f1f264b4SAndreas Gruenbacher * redo the whole thing. 700afc51aaaSDarrick J. Wong */ 701bc6123a8SMatthew Wilcox (Oracle) if (unlikely(copied < len && !folio_test_uptodate(folio))) 702afc51aaaSDarrick J. Wong return 0; 703*3ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); 704bc6123a8SMatthew Wilcox (Oracle) filemap_dirty_folio(inode->i_mapping, folio); 705afc51aaaSDarrick J. Wong return copied; 706afc51aaaSDarrick J. Wong } 707afc51aaaSDarrick J. Wong 708fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter, 7099c4ce08dSMatthew Wilcox (Oracle) struct folio *folio, loff_t pos, size_t copied) 710afc51aaaSDarrick J. Wong { 711fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 712afc51aaaSDarrick J. Wong void *addr; 713afc51aaaSDarrick J. Wong 7149c4ce08dSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 71569f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap)); 716afc51aaaSDarrick J. Wong 7179c4ce08dSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 7189c4ce08dSMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, pos); 719ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied); 720ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 721afc51aaaSDarrick J. Wong 7221b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode); 723afc51aaaSDarrick J. Wong return copied; 724afc51aaaSDarrick J. Wong } 725afc51aaaSDarrick J. Wong 726e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 7271b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 728bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 729afc51aaaSDarrick J. Wong { 730fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 7311b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size; 732e25ba8cbSMatthew Wilcox (Oracle) size_t ret; 733afc51aaaSDarrick J. Wong 734c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) { 7359c4ce08dSMatthew Wilcox (Oracle) ret = iomap_write_end_inline(iter, folio, pos, copied); 736c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 7371b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 738bc6123a8SMatthew Wilcox (Oracle) copied, &folio->page, NULL); 739afc51aaaSDarrick J. Wong } else { 740bc6123a8SMatthew Wilcox (Oracle) ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 741afc51aaaSDarrick J. Wong } 742afc51aaaSDarrick J. Wong 743afc51aaaSDarrick J. Wong /* 744afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 745afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 746afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 747afc51aaaSDarrick J. Wong */ 748afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 7491b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret); 7501b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 751afc51aaaSDarrick J. Wong } 7527a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, ret, folio); 753afc51aaaSDarrick J. Wong 754afc51aaaSDarrick J. Wong if (old_size < pos) 7551b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos); 756afc51aaaSDarrick J. Wong if (ret < len) 757d74999c8SAndreas Gruenbacher iomap_write_failed(iter->inode, pos + ret, len - ret); 758afc51aaaSDarrick J. Wong return ret; 759afc51aaaSDarrick J. Wong } 760afc51aaaSDarrick J. Wong 761ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 762afc51aaaSDarrick J. Wong { 763ce83a025SChristoph Hellwig loff_t length = iomap_length(iter); 7645d8edfb9SMatthew Wilcox (Oracle) size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; 765ce83a025SChristoph Hellwig loff_t pos = iter->pos; 766afc51aaaSDarrick J. Wong ssize_t written = 0; 767ce83a025SChristoph Hellwig long status = 0; 768cae2de69SStefan Roesch struct address_space *mapping = iter->inode->i_mapping; 769cae2de69SStefan Roesch unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; 770afc51aaaSDarrick J. Wong 771afc51aaaSDarrick J. Wong do { 772bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 7735d8edfb9SMatthew Wilcox (Oracle) size_t offset; /* Offset into folio */ 7745d8edfb9SMatthew Wilcox (Oracle) size_t bytes; /* Bytes to write to folio */ 775afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 776afc51aaaSDarrick J. Wong 7775d8edfb9SMatthew Wilcox (Oracle) offset = pos & (chunk - 1); 7785d8edfb9SMatthew Wilcox (Oracle) bytes = min(chunk - offset, iov_iter_count(i)); 779cae2de69SStefan Roesch status = balance_dirty_pages_ratelimited_flags(mapping, 780cae2de69SStefan Roesch bdp_flags); 781cae2de69SStefan Roesch if (unlikely(status)) 782cae2de69SStefan Roesch break; 783cae2de69SStefan Roesch 784afc51aaaSDarrick J. Wong if (bytes > length) 785afc51aaaSDarrick J. Wong bytes = length; 786afc51aaaSDarrick J. Wong 787afc51aaaSDarrick J. Wong /* 788f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_. 789afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 790afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 791afc51aaaSDarrick J. Wong * up-to-date. 792cae2de69SStefan Roesch * 793cae2de69SStefan Roesch * For async buffered writes the assumption is that the user 794cae2de69SStefan Roesch * page has already been faulted in. This can be optimized by 795cae2de69SStefan Roesch * faulting the user page. 796afc51aaaSDarrick J. Wong */ 797631f871fSAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 798afc51aaaSDarrick J. Wong status = -EFAULT; 799afc51aaaSDarrick J. Wong break; 800afc51aaaSDarrick J. Wong } 801afc51aaaSDarrick J. Wong 802bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 803afc51aaaSDarrick J. Wong if (unlikely(status)) 804afc51aaaSDarrick J. Wong break; 805d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE) 806d7b64041SDave Chinner break; 807afc51aaaSDarrick J. Wong 8085d8edfb9SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 8095d8edfb9SMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 8105d8edfb9SMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 8115d8edfb9SMatthew Wilcox (Oracle) 812cae2de69SStefan Roesch if (mapping_writably_mapped(mapping)) 8135d8edfb9SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 814afc51aaaSDarrick J. Wong 8155d8edfb9SMatthew Wilcox (Oracle) copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); 816bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, copied, folio); 817afc51aaaSDarrick J. Wong 818f0b65f39SAl Viro if (unlikely(copied != status)) 819f0b65f39SAl Viro iov_iter_revert(i, copied - status); 820afc51aaaSDarrick J. Wong 821f0b65f39SAl Viro cond_resched(); 822bc1bb416SAl Viro if (unlikely(status == 0)) { 823afc51aaaSDarrick J. Wong /* 824bc1bb416SAl Viro * A short copy made iomap_write_end() reject the 825bc1bb416SAl Viro * thing entirely. Might be memory poisoning 826bc1bb416SAl Viro * halfway through, might be a race with munmap, 827bc1bb416SAl Viro * might be severe memory pressure. 828afc51aaaSDarrick J. Wong */ 829bc1bb416SAl Viro if (copied) 830bc1bb416SAl Viro bytes = copied; 8315d8edfb9SMatthew Wilcox (Oracle) if (chunk > PAGE_SIZE) 8325d8edfb9SMatthew Wilcox (Oracle) chunk /= 2; 8335d8edfb9SMatthew Wilcox (Oracle) } else { 834f0b65f39SAl Viro pos += status; 835f0b65f39SAl Viro written += status; 836f0b65f39SAl Viro length -= status; 8375d8edfb9SMatthew Wilcox (Oracle) } 838afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 839afc51aaaSDarrick J. Wong 84018e419f6SStefan Roesch if (status == -EAGAIN) { 84118e419f6SStefan Roesch iov_iter_revert(i, written); 84218e419f6SStefan Roesch return -EAGAIN; 84318e419f6SStefan Roesch } 844afc51aaaSDarrick J. Wong return written ? written : status; 845afc51aaaSDarrick J. Wong } 846afc51aaaSDarrick J. Wong 847afc51aaaSDarrick J. Wong ssize_t 848ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 849afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 850afc51aaaSDarrick J. Wong { 851ce83a025SChristoph Hellwig struct iomap_iter iter = { 852ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 853ce83a025SChristoph Hellwig .pos = iocb->ki_pos, 854ce83a025SChristoph Hellwig .len = iov_iter_count(i), 855ce83a025SChristoph Hellwig .flags = IOMAP_WRITE, 856ce83a025SChristoph Hellwig }; 857219580eeSChristoph Hellwig ssize_t ret; 858afc51aaaSDarrick J. Wong 859cae2de69SStefan Roesch if (iocb->ki_flags & IOCB_NOWAIT) 860cae2de69SStefan Roesch iter.flags |= IOMAP_NOWAIT; 861cae2de69SStefan Roesch 862ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 863ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i); 864219580eeSChristoph Hellwig 86520c64ec8SChristoph Hellwig if (unlikely(iter.pos == iocb->ki_pos)) 866ce83a025SChristoph Hellwig return ret; 867219580eeSChristoph Hellwig ret = iter.pos - iocb->ki_pos; 868efa96cc9SChristoph Hellwig iocb->ki_pos = iter.pos; 869219580eeSChristoph Hellwig return ret; 870afc51aaaSDarrick J. Wong } 871afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 872afc51aaaSDarrick J. Wong 8739c7babf9SDave Chinner /* 874f43dc4dcSDave Chinner * Scan the data range passed to us for dirty page cache folios. If we find a 875f43dc4dcSDave Chinner * dirty folio, punch out the preceeding range and update the offset from which 876f43dc4dcSDave Chinner * the next punch will start from. 877f43dc4dcSDave Chinner * 878f43dc4dcSDave Chinner * We can punch out storage reservations under clean pages because they either 879f43dc4dcSDave Chinner * contain data that has been written back - in which case the delalloc punch 880f43dc4dcSDave Chinner * over that range is a no-op - or they have been read faults in which case they 881f43dc4dcSDave Chinner * contain zeroes and we can remove the delalloc backing range and any new 882f43dc4dcSDave Chinner * writes to those pages will do the normal hole filling operation... 883f43dc4dcSDave Chinner * 884f43dc4dcSDave Chinner * This makes the logic simple: we only need to keep the delalloc extents only 885f43dc4dcSDave Chinner * over the dirty ranges of the page cache. 886f43dc4dcSDave Chinner * 887f43dc4dcSDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 888f43dc4dcSDave Chinner * simplify range iterations. 889f43dc4dcSDave Chinner */ 890f43dc4dcSDave Chinner static int iomap_write_delalloc_scan(struct inode *inode, 891f43dc4dcSDave Chinner loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 892f43dc4dcSDave Chinner int (*punch)(struct inode *inode, loff_t offset, loff_t length)) 893f43dc4dcSDave Chinner { 894f43dc4dcSDave Chinner while (start_byte < end_byte) { 895f43dc4dcSDave Chinner struct folio *folio; 896f43dc4dcSDave Chinner 897f43dc4dcSDave Chinner /* grab locked page */ 898f43dc4dcSDave Chinner folio = filemap_lock_folio(inode->i_mapping, 899f43dc4dcSDave Chinner start_byte >> PAGE_SHIFT); 90066dabbb6SChristoph Hellwig if (IS_ERR(folio)) { 901f43dc4dcSDave Chinner start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 902f43dc4dcSDave Chinner PAGE_SIZE; 903f43dc4dcSDave Chinner continue; 904f43dc4dcSDave Chinner } 905f43dc4dcSDave Chinner 906f43dc4dcSDave Chinner /* if dirty, punch up to offset */ 907f43dc4dcSDave Chinner if (folio_test_dirty(folio)) { 908f43dc4dcSDave Chinner if (start_byte > *punch_start_byte) { 909f43dc4dcSDave Chinner int error; 910f43dc4dcSDave Chinner 911f43dc4dcSDave Chinner error = punch(inode, *punch_start_byte, 912f43dc4dcSDave Chinner start_byte - *punch_start_byte); 913f43dc4dcSDave Chinner if (error) { 914f43dc4dcSDave Chinner folio_unlock(folio); 915f43dc4dcSDave Chinner folio_put(folio); 916f43dc4dcSDave Chinner return error; 917f43dc4dcSDave Chinner } 918f43dc4dcSDave Chinner } 919f43dc4dcSDave Chinner 920f43dc4dcSDave Chinner /* 921f43dc4dcSDave Chinner * Make sure the next punch start is correctly bound to 922f43dc4dcSDave Chinner * the end of this data range, not the end of the folio. 923f43dc4dcSDave Chinner */ 924f43dc4dcSDave Chinner *punch_start_byte = min_t(loff_t, end_byte, 925f43dc4dcSDave Chinner folio_next_index(folio) << PAGE_SHIFT); 926f43dc4dcSDave Chinner } 927f43dc4dcSDave Chinner 928f43dc4dcSDave Chinner /* move offset to start of next folio in range */ 929f43dc4dcSDave Chinner start_byte = folio_next_index(folio) << PAGE_SHIFT; 930f43dc4dcSDave Chinner folio_unlock(folio); 931f43dc4dcSDave Chinner folio_put(folio); 932f43dc4dcSDave Chinner } 933f43dc4dcSDave Chinner return 0; 934f43dc4dcSDave Chinner } 935f43dc4dcSDave Chinner 936f43dc4dcSDave Chinner /* 937f43dc4dcSDave Chinner * Punch out all the delalloc blocks in the range given except for those that 938f43dc4dcSDave Chinner * have dirty data still pending in the page cache - those are going to be 939f43dc4dcSDave Chinner * written and so must still retain the delalloc backing for writeback. 940f43dc4dcSDave Chinner * 941f43dc4dcSDave Chinner * As we are scanning the page cache for data, we don't need to reimplement the 942f43dc4dcSDave Chinner * wheel - mapping_seek_hole_data() does exactly what we need to identify the 943f43dc4dcSDave Chinner * start and end of data ranges correctly even for sub-folio block sizes. This 944f43dc4dcSDave Chinner * byte range based iteration is especially convenient because it means we 945f43dc4dcSDave Chinner * don't have to care about variable size folios, nor where the start or end of 946f43dc4dcSDave Chinner * the data range lies within a folio, if they lie within the same folio or even 947f43dc4dcSDave Chinner * if there are multiple discontiguous data ranges within the folio. 948f43dc4dcSDave Chinner * 949f43dc4dcSDave Chinner * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so 950f43dc4dcSDave Chinner * can return data ranges that exist in the cache beyond EOF. e.g. a page fault 951f43dc4dcSDave Chinner * spanning EOF will initialise the post-EOF data to zeroes and mark it up to 952f43dc4dcSDave Chinner * date. A write page fault can then mark it dirty. If we then fail a write() 953f43dc4dcSDave Chinner * beyond EOF into that up to date cached range, we allocate a delalloc block 954f43dc4dcSDave Chinner * beyond EOF and then have to punch it out. Because the range is up to date, 955f43dc4dcSDave Chinner * mapping_seek_hole_data() will return it, and we will skip the punch because 956f43dc4dcSDave Chinner * the folio is dirty. THis is incorrect - we always need to punch out delalloc 957f43dc4dcSDave Chinner * beyond EOF in this case as writeback will never write back and covert that 958f43dc4dcSDave Chinner * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, 959f43dc4dcSDave Chinner * resulting in always punching out the range from the EOF to the end of the 960f43dc4dcSDave Chinner * range the iomap spans. 961f43dc4dcSDave Chinner * 962f43dc4dcSDave Chinner * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it 963f43dc4dcSDave Chinner * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA 964f43dc4dcSDave Chinner * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) 965f43dc4dcSDave Chinner * returns the end of the data range (data_end). Using closed intervals would 966f43dc4dcSDave Chinner * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose 967f43dc4dcSDave Chinner * the code to subtle off-by-one bugs.... 968f43dc4dcSDave Chinner */ 969f43dc4dcSDave Chinner static int iomap_write_delalloc_release(struct inode *inode, 970f43dc4dcSDave Chinner loff_t start_byte, loff_t end_byte, 971f43dc4dcSDave Chinner int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 972f43dc4dcSDave Chinner { 973f43dc4dcSDave Chinner loff_t punch_start_byte = start_byte; 974f43dc4dcSDave Chinner loff_t scan_end_byte = min(i_size_read(inode), end_byte); 975f43dc4dcSDave Chinner int error = 0; 976f43dc4dcSDave Chinner 977f43dc4dcSDave Chinner /* 978f43dc4dcSDave Chinner * Lock the mapping to avoid races with page faults re-instantiating 979f43dc4dcSDave Chinner * folios and dirtying them via ->page_mkwrite whilst we walk the 980f43dc4dcSDave Chinner * cache and perform delalloc extent removal. Failing to do this can 981f43dc4dcSDave Chinner * leave dirty pages with no space reservation in the cache. 982f43dc4dcSDave Chinner */ 983f43dc4dcSDave Chinner filemap_invalidate_lock(inode->i_mapping); 984f43dc4dcSDave Chinner while (start_byte < scan_end_byte) { 985f43dc4dcSDave Chinner loff_t data_end; 986f43dc4dcSDave Chinner 987f43dc4dcSDave Chinner start_byte = mapping_seek_hole_data(inode->i_mapping, 988f43dc4dcSDave Chinner start_byte, scan_end_byte, SEEK_DATA); 989f43dc4dcSDave Chinner /* 990f43dc4dcSDave Chinner * If there is no more data to scan, all that is left is to 991f43dc4dcSDave Chinner * punch out the remaining range. 992f43dc4dcSDave Chinner */ 993f43dc4dcSDave Chinner if (start_byte == -ENXIO || start_byte == scan_end_byte) 994f43dc4dcSDave Chinner break; 995f43dc4dcSDave Chinner if (start_byte < 0) { 996f43dc4dcSDave Chinner error = start_byte; 997f43dc4dcSDave Chinner goto out_unlock; 998f43dc4dcSDave Chinner } 999f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte < punch_start_byte); 1000f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte > scan_end_byte); 1001f43dc4dcSDave Chinner 1002f43dc4dcSDave Chinner /* 1003f43dc4dcSDave Chinner * We find the end of this contiguous cached data range by 1004f43dc4dcSDave Chinner * seeking from start_byte to the beginning of the next hole. 1005f43dc4dcSDave Chinner */ 1006f43dc4dcSDave Chinner data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, 1007f43dc4dcSDave Chinner scan_end_byte, SEEK_HOLE); 1008f43dc4dcSDave Chinner if (data_end < 0) { 1009f43dc4dcSDave Chinner error = data_end; 1010f43dc4dcSDave Chinner goto out_unlock; 1011f43dc4dcSDave Chinner } 1012f43dc4dcSDave Chinner WARN_ON_ONCE(data_end <= start_byte); 1013f43dc4dcSDave Chinner WARN_ON_ONCE(data_end > scan_end_byte); 1014f43dc4dcSDave Chinner 1015f43dc4dcSDave Chinner error = iomap_write_delalloc_scan(inode, &punch_start_byte, 1016f43dc4dcSDave Chinner start_byte, data_end, punch); 1017f43dc4dcSDave Chinner if (error) 1018f43dc4dcSDave Chinner goto out_unlock; 1019f43dc4dcSDave Chinner 1020f43dc4dcSDave Chinner /* The next data search starts at the end of this one. */ 1021f43dc4dcSDave Chinner start_byte = data_end; 1022f43dc4dcSDave Chinner } 1023f43dc4dcSDave Chinner 1024f43dc4dcSDave Chinner if (punch_start_byte < end_byte) 1025f43dc4dcSDave Chinner error = punch(inode, punch_start_byte, 1026f43dc4dcSDave Chinner end_byte - punch_start_byte); 1027f43dc4dcSDave Chinner out_unlock: 1028f43dc4dcSDave Chinner filemap_invalidate_unlock(inode->i_mapping); 1029f43dc4dcSDave Chinner return error; 1030f43dc4dcSDave Chinner } 1031f43dc4dcSDave Chinner 1032f43dc4dcSDave Chinner /* 10339c7babf9SDave Chinner * When a short write occurs, the filesystem may need to remove reserved space 10349c7babf9SDave Chinner * that was allocated in ->iomap_begin from it's ->iomap_end method. For 10359c7babf9SDave Chinner * filesystems that use delayed allocation, we need to punch out delalloc 10369c7babf9SDave Chinner * extents from the range that are not dirty in the page cache. As the write can 10379c7babf9SDave Chinner * race with page faults, there can be dirty pages over the delalloc extent 10389c7babf9SDave Chinner * outside the range of a short write but still within the delalloc extent 10399c7babf9SDave Chinner * allocated for this iomap. 10409c7babf9SDave Chinner * 10419c7babf9SDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1042f43dc4dcSDave Chinner * simplify range iterations. 1043f43dc4dcSDave Chinner * 1044f43dc4dcSDave Chinner * The punch() callback *must* only punch delalloc extents in the range passed 1045f43dc4dcSDave Chinner * to it. It must skip over all other types of extents in the range and leave 1046f43dc4dcSDave Chinner * them completely unchanged. It must do this punch atomically with respect to 1047f43dc4dcSDave Chinner * other extent modifications. 1048f43dc4dcSDave Chinner * 1049f43dc4dcSDave Chinner * The punch() callback may be called with a folio locked to prevent writeback 1050f43dc4dcSDave Chinner * extent allocation racing at the edge of the range we are currently punching. 1051f43dc4dcSDave Chinner * The locked folio may or may not cover the range being punched, so it is not 1052f43dc4dcSDave Chinner * safe for the punch() callback to lock folios itself. 1053f43dc4dcSDave Chinner * 1054f43dc4dcSDave Chinner * Lock order is: 1055f43dc4dcSDave Chinner * 1056f43dc4dcSDave Chinner * inode->i_rwsem (shared or exclusive) 1057f43dc4dcSDave Chinner * inode->i_mapping->invalidate_lock (exclusive) 1058f43dc4dcSDave Chinner * folio_lock() 1059f43dc4dcSDave Chinner * ->punch 1060f43dc4dcSDave Chinner * internal filesystem allocation lock 10619c7babf9SDave Chinner */ 10629c7babf9SDave Chinner int iomap_file_buffered_write_punch_delalloc(struct inode *inode, 10639c7babf9SDave Chinner struct iomap *iomap, loff_t pos, loff_t length, 10649c7babf9SDave Chinner ssize_t written, 10659c7babf9SDave Chinner int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 10669c7babf9SDave Chinner { 10679c7babf9SDave Chinner loff_t start_byte; 10689c7babf9SDave Chinner loff_t end_byte; 1069302efbefSLu Hongfei unsigned int blocksize = i_blocksize(inode); 10709c7babf9SDave Chinner 10719c7babf9SDave Chinner if (iomap->type != IOMAP_DELALLOC) 10729c7babf9SDave Chinner return 0; 10739c7babf9SDave Chinner 10749c7babf9SDave Chinner /* If we didn't reserve the blocks, we're not allowed to punch them. */ 10759c7babf9SDave Chinner if (!(iomap->flags & IOMAP_F_NEW)) 10769c7babf9SDave Chinner return 0; 10779c7babf9SDave Chinner 10789c7babf9SDave Chinner /* 10799c7babf9SDave Chinner * start_byte refers to the first unused block after a short write. If 10809c7babf9SDave Chinner * nothing was written, round offset down to point at the first block in 10819c7babf9SDave Chinner * the range. 10829c7babf9SDave Chinner */ 10839c7babf9SDave Chinner if (unlikely(!written)) 10849c7babf9SDave Chinner start_byte = round_down(pos, blocksize); 10859c7babf9SDave Chinner else 10869c7babf9SDave Chinner start_byte = round_up(pos + written, blocksize); 10879c7babf9SDave Chinner end_byte = round_up(pos + length, blocksize); 10889c7babf9SDave Chinner 10899c7babf9SDave Chinner /* Nothing to do if we've written the entire delalloc extent */ 10909c7babf9SDave Chinner if (start_byte >= end_byte) 10919c7babf9SDave Chinner return 0; 10929c7babf9SDave Chinner 1093f43dc4dcSDave Chinner return iomap_write_delalloc_release(inode, start_byte, end_byte, 1094f43dc4dcSDave Chinner punch); 10959c7babf9SDave Chinner } 10969c7babf9SDave Chinner EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); 10979c7babf9SDave Chinner 10988fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter) 1099afc51aaaSDarrick J. Wong { 11008fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap; 1101fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 11028fc274d1SChristoph Hellwig loff_t pos = iter->pos; 11038fc274d1SChristoph Hellwig loff_t length = iomap_length(iter); 1104afc51aaaSDarrick J. Wong long status = 0; 1105d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0; 1106afc51aaaSDarrick J. Wong 11073590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 11083590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 11093590c4d8SChristoph Hellwig return length; 11103590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 1111c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 11123590c4d8SChristoph Hellwig return length; 11133590c4d8SChristoph Hellwig 1114afc51aaaSDarrick J. Wong do { 111532a38a49SChristoph Hellwig unsigned long offset = offset_in_page(pos); 111632a38a49SChristoph Hellwig unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 1117bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 1118afc51aaaSDarrick J. Wong 1119bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 1120afc51aaaSDarrick J. Wong if (unlikely(status)) 1121afc51aaaSDarrick J. Wong return status; 1122d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE) 1123d7b64041SDave Chinner break; 1124afc51aaaSDarrick J. Wong 1125bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, bytes, folio); 1126afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 1127afc51aaaSDarrick J. Wong return -EIO; 1128afc51aaaSDarrick J. Wong 1129afc51aaaSDarrick J. Wong cond_resched(); 1130afc51aaaSDarrick J. Wong 1131afc51aaaSDarrick J. Wong pos += status; 1132afc51aaaSDarrick J. Wong written += status; 1133afc51aaaSDarrick J. Wong length -= status; 1134afc51aaaSDarrick J. Wong 11358fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 1136afc51aaaSDarrick J. Wong } while (length); 1137afc51aaaSDarrick J. Wong 1138afc51aaaSDarrick J. Wong return written; 1139afc51aaaSDarrick J. Wong } 1140afc51aaaSDarrick J. Wong 1141afc51aaaSDarrick J. Wong int 11423590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1143afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1144afc51aaaSDarrick J. Wong { 11458fc274d1SChristoph Hellwig struct iomap_iter iter = { 11468fc274d1SChristoph Hellwig .inode = inode, 11478fc274d1SChristoph Hellwig .pos = pos, 11488fc274d1SChristoph Hellwig .len = len, 1149b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE, 11508fc274d1SChristoph Hellwig }; 11518fc274d1SChristoph Hellwig int ret; 1152afc51aaaSDarrick J. Wong 11538fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 11548fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter); 1155afc51aaaSDarrick J. Wong return ret; 1156afc51aaaSDarrick J. Wong } 11573590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 1158afc51aaaSDarrick J. Wong 11592aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 1160afc51aaaSDarrick J. Wong { 1161fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 11622aa3048eSChristoph Hellwig loff_t pos = iter->pos; 11632aa3048eSChristoph Hellwig loff_t length = iomap_length(iter); 1164afc51aaaSDarrick J. Wong loff_t written = 0; 1165afc51aaaSDarrick J. Wong 1166afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 1167c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 116881ee8e52SMatthew Wilcox (Oracle) return length; 1169afc51aaaSDarrick J. Wong 1170afc51aaaSDarrick J. Wong do { 11714d7bd0ebSMatthew Wilcox (Oracle) struct folio *folio; 11724d7bd0ebSMatthew Wilcox (Oracle) int status; 11734d7bd0ebSMatthew Wilcox (Oracle) size_t offset; 11744d7bd0ebSMatthew Wilcox (Oracle) size_t bytes = min_t(u64, SIZE_MAX, length); 1175afc51aaaSDarrick J. Wong 11764d7bd0ebSMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 11774d7bd0ebSMatthew Wilcox (Oracle) if (status) 11784d7bd0ebSMatthew Wilcox (Oracle) return status; 1179d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE) 1180d7b64041SDave Chinner break; 11814d7bd0ebSMatthew Wilcox (Oracle) 11824d7bd0ebSMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 11834d7bd0ebSMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 11844d7bd0ebSMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 11854d7bd0ebSMatthew Wilcox (Oracle) 11864d7bd0ebSMatthew Wilcox (Oracle) folio_zero_range(folio, offset, bytes); 11874d7bd0ebSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 11884d7bd0ebSMatthew Wilcox (Oracle) 11894d7bd0ebSMatthew Wilcox (Oracle) bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 11904d7bd0ebSMatthew Wilcox (Oracle) if (WARN_ON_ONCE(bytes == 0)) 11914d7bd0ebSMatthew Wilcox (Oracle) return -EIO; 1192afc51aaaSDarrick J. Wong 1193afc51aaaSDarrick J. Wong pos += bytes; 119481ee8e52SMatthew Wilcox (Oracle) length -= bytes; 1195afc51aaaSDarrick J. Wong written += bytes; 119681ee8e52SMatthew Wilcox (Oracle) } while (length > 0); 1197afc51aaaSDarrick J. Wong 119898eb8d95SKaixu Xia if (did_zero) 119998eb8d95SKaixu Xia *did_zero = true; 1200afc51aaaSDarrick J. Wong return written; 1201afc51aaaSDarrick J. Wong } 1202afc51aaaSDarrick J. Wong 1203afc51aaaSDarrick J. Wong int 1204afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1205afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1206afc51aaaSDarrick J. Wong { 12072aa3048eSChristoph Hellwig struct iomap_iter iter = { 12082aa3048eSChristoph Hellwig .inode = inode, 12092aa3048eSChristoph Hellwig .pos = pos, 12102aa3048eSChristoph Hellwig .len = len, 12112aa3048eSChristoph Hellwig .flags = IOMAP_ZERO, 12122aa3048eSChristoph Hellwig }; 12132aa3048eSChristoph Hellwig int ret; 1214afc51aaaSDarrick J. Wong 12152aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 12162aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero); 1217afc51aaaSDarrick J. Wong return ret; 1218afc51aaaSDarrick J. Wong } 1219afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 1220afc51aaaSDarrick J. Wong 1221afc51aaaSDarrick J. Wong int 1222afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1223afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1224afc51aaaSDarrick J. Wong { 1225afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 1226afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 1227afc51aaaSDarrick J. Wong 1228afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 1229afc51aaaSDarrick J. Wong if (!off) 1230afc51aaaSDarrick J. Wong return 0; 1231afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1232afc51aaaSDarrick J. Wong } 1233afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 1234afc51aaaSDarrick J. Wong 1235ea0f843aSMatthew Wilcox (Oracle) static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 1236ea0f843aSMatthew Wilcox (Oracle) struct folio *folio) 1237afc51aaaSDarrick J. Wong { 1238253564baSChristoph Hellwig loff_t length = iomap_length(iter); 1239afc51aaaSDarrick J. Wong int ret; 1240afc51aaaSDarrick J. Wong 1241253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 1242d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL, 1243253564baSChristoph Hellwig &iter->iomap); 1244afc51aaaSDarrick J. Wong if (ret) 1245afc51aaaSDarrick J. Wong return ret; 1246ea0f843aSMatthew Wilcox (Oracle) block_commit_write(&folio->page, 0, length); 1247afc51aaaSDarrick J. Wong } else { 1248ea0f843aSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 1249ea0f843aSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1250afc51aaaSDarrick J. Wong } 1251afc51aaaSDarrick J. Wong 1252afc51aaaSDarrick J. Wong return length; 1253afc51aaaSDarrick J. Wong } 1254afc51aaaSDarrick J. Wong 1255afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1256afc51aaaSDarrick J. Wong { 1257253564baSChristoph Hellwig struct iomap_iter iter = { 1258253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file), 1259253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT, 1260253564baSChristoph Hellwig }; 1261ea0f843aSMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page); 1262afc51aaaSDarrick J. Wong ssize_t ret; 1263afc51aaaSDarrick J. Wong 1264ea0f843aSMatthew Wilcox (Oracle) folio_lock(folio); 1265ea0f843aSMatthew Wilcox (Oracle) ret = folio_mkwrite_check_truncate(folio, iter.inode); 1266243145bcSAndreas Gruenbacher if (ret < 0) 1267afc51aaaSDarrick J. Wong goto out_unlock; 1268ea0f843aSMatthew Wilcox (Oracle) iter.pos = folio_pos(folio); 1269253564baSChristoph Hellwig iter.len = ret; 1270253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1271ea0f843aSMatthew Wilcox (Oracle) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1272afc51aaaSDarrick J. Wong 1273253564baSChristoph Hellwig if (ret < 0) 1274afc51aaaSDarrick J. Wong goto out_unlock; 1275ea0f843aSMatthew Wilcox (Oracle) folio_wait_stable(folio); 1276afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1277afc51aaaSDarrick J. Wong out_unlock: 1278ea0f843aSMatthew Wilcox (Oracle) folio_unlock(folio); 1279afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1280afc51aaaSDarrick J. Wong } 1281afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1282598ecfbaSChristoph Hellwig 12838ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 12848ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 1285598ecfbaSChristoph Hellwig { 128604f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 1287598ecfbaSChristoph Hellwig 1288598ecfbaSChristoph Hellwig if (error) { 12898ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 1290b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1291598ecfbaSChristoph Hellwig } 1292598ecfbaSChristoph Hellwig 129304f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); 129404f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); 1295598ecfbaSChristoph Hellwig 129604f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) 12978ffd74e9SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1298598ecfbaSChristoph Hellwig } 1299598ecfbaSChristoph Hellwig 1300598ecfbaSChristoph Hellwig /* 1301598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1302598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1303598ecfbaSChristoph Hellwig * ioend after this. 1304598ecfbaSChristoph Hellwig */ 1305ebb7fb15SDave Chinner static u32 1306598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1307598ecfbaSChristoph Hellwig { 1308598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1309598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1310598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1311598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1312c275779fSZorro Lang loff_t offset = ioend->io_offset; 1313598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1314ebb7fb15SDave Chinner u32 folio_count = 0; 1315598ecfbaSChristoph Hellwig 1316598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 13178ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 1318598ecfbaSChristoph Hellwig 1319598ecfbaSChristoph Hellwig /* 1320598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1321598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1322598ecfbaSChristoph Hellwig */ 1323598ecfbaSChristoph Hellwig if (bio == last) 1324598ecfbaSChristoph Hellwig next = NULL; 1325598ecfbaSChristoph Hellwig else 1326598ecfbaSChristoph Hellwig next = bio->bi_private; 1327598ecfbaSChristoph Hellwig 13288ffd74e9SMatthew Wilcox (Oracle) /* walk all folios in bio, ending page IO on them */ 1329ebb7fb15SDave Chinner bio_for_each_folio_all(fi, bio) { 13308ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_write(inode, fi.folio, fi.length, 13318ffd74e9SMatthew Wilcox (Oracle) error); 1332ebb7fb15SDave Chinner folio_count++; 1333ebb7fb15SDave Chinner } 1334598ecfbaSChristoph Hellwig bio_put(bio); 1335598ecfbaSChristoph Hellwig } 1336c275779fSZorro Lang /* The ioend has been freed by bio_put() */ 1337598ecfbaSChristoph Hellwig 1338598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1339598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 13409cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 1341c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start); 1342598ecfbaSChristoph Hellwig } 1343ebb7fb15SDave Chinner return folio_count; 1344598ecfbaSChristoph Hellwig } 1345598ecfbaSChristoph Hellwig 1346ebb7fb15SDave Chinner /* 1347ebb7fb15SDave Chinner * Ioend completion routine for merged bios. This can only be called from task 1348ebb7fb15SDave Chinner * contexts as merged ioends can be of unbound length. Hence we have to break up 1349ebb7fb15SDave Chinner * the writeback completions into manageable chunks to avoid long scheduler 1350ebb7fb15SDave Chinner * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1351ebb7fb15SDave Chinner * good batch processing throughput without creating adverse scheduler latency 1352ebb7fb15SDave Chinner * conditions. 1353ebb7fb15SDave Chinner */ 1354598ecfbaSChristoph Hellwig void 1355598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1356598ecfbaSChristoph Hellwig { 1357598ecfbaSChristoph Hellwig struct list_head tmp; 1358ebb7fb15SDave Chinner u32 completions; 1359ebb7fb15SDave Chinner 1360ebb7fb15SDave Chinner might_sleep(); 1361598ecfbaSChristoph Hellwig 1362598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1363ebb7fb15SDave Chinner completions = iomap_finish_ioend(ioend, error); 1364598ecfbaSChristoph Hellwig 1365598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1366ebb7fb15SDave Chinner if (completions > IOEND_BATCH_SIZE * 8) { 1367ebb7fb15SDave Chinner cond_resched(); 1368ebb7fb15SDave Chinner completions = 0; 1369ebb7fb15SDave Chinner } 1370598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1371598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1372ebb7fb15SDave Chinner completions += iomap_finish_ioend(ioend, error); 1373598ecfbaSChristoph Hellwig } 1374598ecfbaSChristoph Hellwig } 1375598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1376598ecfbaSChristoph Hellwig 1377598ecfbaSChristoph Hellwig /* 1378598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1379598ecfbaSChristoph Hellwig */ 1380598ecfbaSChristoph Hellwig static bool 1381598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1382598ecfbaSChristoph Hellwig { 1383598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1384598ecfbaSChristoph Hellwig return false; 1385598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1386598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1387598ecfbaSChristoph Hellwig return false; 1388598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1389598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1390598ecfbaSChristoph Hellwig return false; 1391598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1392598ecfbaSChristoph Hellwig return false; 1393ebb7fb15SDave Chinner /* 1394ebb7fb15SDave Chinner * Do not merge physically discontiguous ioends. The filesystem 1395ebb7fb15SDave Chinner * completion functions will have to iterate the physical 1396ebb7fb15SDave Chinner * discontiguities even if we merge the ioends at a logical level, so 1397ebb7fb15SDave Chinner * we don't gain anything by merging physical discontiguities here. 1398ebb7fb15SDave Chinner * 1399ebb7fb15SDave Chinner * We cannot use bio->bi_iter.bi_sector here as it is modified during 1400ebb7fb15SDave Chinner * submission so does not point to the start sector of the bio at 1401ebb7fb15SDave Chinner * completion. 1402ebb7fb15SDave Chinner */ 1403ebb7fb15SDave Chinner if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1404ebb7fb15SDave Chinner return false; 1405598ecfbaSChristoph Hellwig return true; 1406598ecfbaSChristoph Hellwig } 1407598ecfbaSChristoph Hellwig 1408598ecfbaSChristoph Hellwig void 14096e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1410598ecfbaSChristoph Hellwig { 1411598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1412598ecfbaSChristoph Hellwig 1413598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1414598ecfbaSChristoph Hellwig 1415598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1416598ecfbaSChristoph Hellwig io_list))) { 1417598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1418598ecfbaSChristoph Hellwig break; 1419598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1420598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1421598ecfbaSChristoph Hellwig } 1422598ecfbaSChristoph Hellwig } 1423598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1424598ecfbaSChristoph Hellwig 1425598ecfbaSChristoph Hellwig static int 14264f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a, 14274f0f586bSSami Tolvanen const struct list_head *b) 1428598ecfbaSChristoph Hellwig { 1429b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1430b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1431598ecfbaSChristoph Hellwig 1432598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1433598ecfbaSChristoph Hellwig return -1; 1434b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1435598ecfbaSChristoph Hellwig return 1; 1436598ecfbaSChristoph Hellwig return 0; 1437598ecfbaSChristoph Hellwig } 1438598ecfbaSChristoph Hellwig 1439598ecfbaSChristoph Hellwig void 1440598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1441598ecfbaSChristoph Hellwig { 1442598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1443598ecfbaSChristoph Hellwig } 1444598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1445598ecfbaSChristoph Hellwig 1446598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1447598ecfbaSChristoph Hellwig { 1448598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1449598ecfbaSChristoph Hellwig 1450598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1451598ecfbaSChristoph Hellwig } 1452598ecfbaSChristoph Hellwig 1453598ecfbaSChristoph Hellwig /* 1454598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1455598ecfbaSChristoph Hellwig * 1456598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1457f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback 1458598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1459598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1460598ecfbaSChristoph Hellwig */ 1461598ecfbaSChristoph Hellwig static int 1462598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1463598ecfbaSChristoph Hellwig int error) 1464598ecfbaSChristoph Hellwig { 1465598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1466598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1467598ecfbaSChristoph Hellwig 1468598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1469598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1470598ecfbaSChristoph Hellwig if (error) { 1471598ecfbaSChristoph Hellwig /* 1472f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an 1473598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1474598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1475598ecfbaSChristoph Hellwig * time. 1476598ecfbaSChristoph Hellwig */ 1477598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1478598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1479598ecfbaSChristoph Hellwig return error; 1480598ecfbaSChristoph Hellwig } 1481598ecfbaSChristoph Hellwig 1482598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1483598ecfbaSChristoph Hellwig return 0; 1484598ecfbaSChristoph Hellwig } 1485598ecfbaSChristoph Hellwig 1486598ecfbaSChristoph Hellwig static struct iomap_ioend * 1487598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1488598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1489598ecfbaSChristoph Hellwig { 1490598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1491598ecfbaSChristoph Hellwig struct bio *bio; 1492598ecfbaSChristoph Hellwig 1493609be106SChristoph Hellwig bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1494609be106SChristoph Hellwig REQ_OP_WRITE | wbc_to_write_flags(wbc), 1495609be106SChristoph Hellwig GFP_NOFS, &iomap_ioend_bioset); 1496598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1497598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1498598ecfbaSChristoph Hellwig 1499598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1500598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1501598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1502598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1503598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1504598ecfbaSChristoph Hellwig ioend->io_size = 0; 1505ebb7fb15SDave Chinner ioend->io_folios = 0; 1506598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1507598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1508ebb7fb15SDave Chinner ioend->io_sector = sector; 1509598ecfbaSChristoph Hellwig return ioend; 1510598ecfbaSChristoph Hellwig } 1511598ecfbaSChristoph Hellwig 1512598ecfbaSChristoph Hellwig /* 1513598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1514598ecfbaSChristoph Hellwig * 1515f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order 1516598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1517598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1518598ecfbaSChristoph Hellwig */ 1519598ecfbaSChristoph Hellwig static struct bio * 1520598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1521598ecfbaSChristoph Hellwig { 1522598ecfbaSChristoph Hellwig struct bio *new; 1523598ecfbaSChristoph Hellwig 152407888c66SChristoph Hellwig new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 152507888c66SChristoph Hellwig bio_clone_blkg_association(new, prev); 1526598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1527598ecfbaSChristoph Hellwig 1528598ecfbaSChristoph Hellwig bio_chain(prev, new); 1529598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1530598ecfbaSChristoph Hellwig submit_bio(prev); 1531598ecfbaSChristoph Hellwig return new; 1532598ecfbaSChristoph Hellwig } 1533598ecfbaSChristoph Hellwig 1534598ecfbaSChristoph Hellwig static bool 1535598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1536598ecfbaSChristoph Hellwig sector_t sector) 1537598ecfbaSChristoph Hellwig { 1538598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1539598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1540598ecfbaSChristoph Hellwig return false; 1541598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1542598ecfbaSChristoph Hellwig return false; 1543598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1544598ecfbaSChristoph Hellwig return false; 1545598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1546598ecfbaSChristoph Hellwig return false; 1547ebb7fb15SDave Chinner /* 1548ebb7fb15SDave Chinner * Limit ioend bio chain lengths to minimise IO completion latency. This 1549ebb7fb15SDave Chinner * also prevents long tight loops ending page writeback on all the 1550ebb7fb15SDave Chinner * folios in the ioend. 1551ebb7fb15SDave Chinner */ 1552ebb7fb15SDave Chinner if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1553ebb7fb15SDave Chinner return false; 1554598ecfbaSChristoph Hellwig return true; 1555598ecfbaSChristoph Hellwig } 1556598ecfbaSChristoph Hellwig 1557598ecfbaSChristoph Hellwig /* 1558598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1559f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another. 1560598ecfbaSChristoph Hellwig */ 1561598ecfbaSChristoph Hellwig static void 1562e735c007SMatthew Wilcox (Oracle) iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 156304f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc, 1564598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1565598ecfbaSChristoph Hellwig { 1566e735c007SMatthew Wilcox (Oracle) sector_t sector = iomap_sector(&wpc->iomap, pos); 1567598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1568e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, pos); 1569598ecfbaSChristoph Hellwig 1570e735c007SMatthew Wilcox (Oracle) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1571598ecfbaSChristoph Hellwig if (wpc->ioend) 1572598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1573e735c007SMatthew Wilcox (Oracle) wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1574598ecfbaSChristoph Hellwig } 1575598ecfbaSChristoph Hellwig 1576e735c007SMatthew Wilcox (Oracle) if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1577c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1578c2478469SJohannes Thumshirn bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff); 1579c1b79f11SChristoph Hellwig } 1580c1b79f11SChristoph Hellwig 158104f52c4eSRitesh Harjani (IBM) if (ifs) 158204f52c4eSRitesh Harjani (IBM) atomic_add(len, &ifs->write_bytes_pending); 1583598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1584e735c007SMatthew Wilcox (Oracle) wbc_account_cgroup_owner(wbc, &folio->page, len); 1585598ecfbaSChristoph Hellwig } 1586598ecfbaSChristoph Hellwig 1587598ecfbaSChristoph Hellwig /* 1588598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1589598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1590f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're 1591f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block 1592f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that 1593598ecfbaSChristoph Hellwig * instead. 1594598ecfbaSChristoph Hellwig * 1595598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1596598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1597598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1598598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1599598ecfbaSChristoph Hellwig * 1600598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1601598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1602598ecfbaSChristoph Hellwig */ 1603598ecfbaSChristoph Hellwig static int 1604598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1605598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1606e735c007SMatthew Wilcox (Oracle) struct folio *folio, u64 end_pos) 1607598ecfbaSChristoph Hellwig { 160804f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = ifs_alloc(inode, folio, 0); 1609598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1610598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 161192655036SMatthew Wilcox (Oracle) unsigned nblocks = i_blocks_per_folio(inode, folio); 161292655036SMatthew Wilcox (Oracle) u64 pos = folio_pos(folio); 1613598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1614598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1615598ecfbaSChristoph Hellwig 161604f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0); 1617598ecfbaSChristoph Hellwig 1618598ecfbaSChristoph Hellwig /* 161992655036SMatthew Wilcox (Oracle) * Walk through the folio to find areas to write back. If we 162092655036SMatthew Wilcox (Oracle) * run off the end of the current map or find the current map 162192655036SMatthew Wilcox (Oracle) * invalid, grab a new one. 1622598ecfbaSChristoph Hellwig */ 162392655036SMatthew Wilcox (Oracle) for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 162404f52c4eSRitesh Harjani (IBM) if (ifs && !test_bit(i, ifs->state)) 1625598ecfbaSChristoph Hellwig continue; 1626598ecfbaSChristoph Hellwig 162792655036SMatthew Wilcox (Oracle) error = wpc->ops->map_blocks(wpc, inode, pos); 1628598ecfbaSChristoph Hellwig if (error) 1629598ecfbaSChristoph Hellwig break; 1630adc9c2e5SDarrick J. Wong trace_iomap_writepage_map(inode, &wpc->iomap); 16313e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 16323e19e6f3SChristoph Hellwig continue; 1633598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1634598ecfbaSChristoph Hellwig continue; 163504f52c4eSRitesh Harjani (IBM) iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc, 1636598ecfbaSChristoph Hellwig &submit_list); 1637598ecfbaSChristoph Hellwig count++; 1638598ecfbaSChristoph Hellwig } 1639ebb7fb15SDave Chinner if (count) 1640ebb7fb15SDave Chinner wpc->ioend->io_folios++; 1641598ecfbaSChristoph Hellwig 1642598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1643e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_locked(folio)); 1644e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 1645e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_dirty(folio)); 1646598ecfbaSChristoph Hellwig 1647598ecfbaSChristoph Hellwig /* 1648598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1649598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1650598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1651598ecfbaSChristoph Hellwig * appropriately. 1652598ecfbaSChristoph Hellwig */ 1653598ecfbaSChristoph Hellwig if (unlikely(error)) { 1654598ecfbaSChristoph Hellwig /* 1655763e4cdcSBrian Foster * Let the filesystem know what portion of the current page 1656f1f264b4SAndreas Gruenbacher * failed to map. If the page hasn't been added to ioend, it 1657763e4cdcSBrian Foster * won't be affected by I/O completion and we must unlock it 1658763e4cdcSBrian Foster * now. 1659598ecfbaSChristoph Hellwig */ 16606e478521SMatthew Wilcox (Oracle) if (wpc->ops->discard_folio) 166192655036SMatthew Wilcox (Oracle) wpc->ops->discard_folio(folio, pos); 1662763e4cdcSBrian Foster if (!count) { 1663e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1664598ecfbaSChristoph Hellwig goto done; 1665598ecfbaSChristoph Hellwig } 1666598ecfbaSChristoph Hellwig } 1667598ecfbaSChristoph Hellwig 1668e735c007SMatthew Wilcox (Oracle) folio_start_writeback(folio); 1669e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1670598ecfbaSChristoph Hellwig 1671598ecfbaSChristoph Hellwig /* 1672f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch 1673598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1674598ecfbaSChristoph Hellwig * submissions. 1675598ecfbaSChristoph Hellwig */ 1676598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1677598ecfbaSChristoph Hellwig int error2; 1678598ecfbaSChristoph Hellwig 1679598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1680598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1681598ecfbaSChristoph Hellwig if (error2 && !error) 1682598ecfbaSChristoph Hellwig error = error2; 1683598ecfbaSChristoph Hellwig } 1684598ecfbaSChristoph Hellwig 1685598ecfbaSChristoph Hellwig /* 1686598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1687598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1688598ecfbaSChristoph Hellwig */ 1689598ecfbaSChristoph Hellwig if (!count) 1690e735c007SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1691598ecfbaSChristoph Hellwig done: 16923d5f3ba1SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1693598ecfbaSChristoph Hellwig return error; 1694598ecfbaSChristoph Hellwig } 1695598ecfbaSChristoph Hellwig 1696598ecfbaSChristoph Hellwig /* 1697598ecfbaSChristoph Hellwig * Write out a dirty page. 1698598ecfbaSChristoph Hellwig * 1699f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it. 1700f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to 1701598ecfbaSChristoph Hellwig * regular allocated space. 1702598ecfbaSChristoph Hellwig */ 1703d585bdbeSMatthew Wilcox (Oracle) static int iomap_do_writepage(struct folio *folio, 1704d585bdbeSMatthew Wilcox (Oracle) struct writeback_control *wbc, void *data) 1705598ecfbaSChristoph Hellwig { 1706598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1707e735c007SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 170881d4782aSMatthew Wilcox (Oracle) u64 end_pos, isize; 1709598ecfbaSChristoph Hellwig 1710e735c007SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1711598ecfbaSChristoph Hellwig 1712598ecfbaSChristoph Hellwig /* 1713e735c007SMatthew Wilcox (Oracle) * Refuse to write the folio out if we're called from reclaim context. 1714598ecfbaSChristoph Hellwig * 1715598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1716598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1717598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1718598ecfbaSChristoph Hellwig * 1719598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1720598ecfbaSChristoph Hellwig * warn about it. 1721598ecfbaSChristoph Hellwig */ 1722598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1723598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1724598ecfbaSChristoph Hellwig goto redirty; 1725598ecfbaSChristoph Hellwig 1726598ecfbaSChristoph Hellwig /* 1727e735c007SMatthew Wilcox (Oracle) * Is this folio beyond the end of the file? 1728598ecfbaSChristoph Hellwig * 1729e735c007SMatthew Wilcox (Oracle) * The folio index is less than the end_index, adjust the end_pos 1730e735c007SMatthew Wilcox (Oracle) * to the highest offset that this folio should represent. 1731598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1732598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1733598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1734598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1735598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1736598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1737598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1738598ecfbaSChristoph Hellwig */ 173981d4782aSMatthew Wilcox (Oracle) isize = i_size_read(inode); 1740e735c007SMatthew Wilcox (Oracle) end_pos = folio_pos(folio) + folio_size(folio); 174181d4782aSMatthew Wilcox (Oracle) if (end_pos > isize) { 1742598ecfbaSChristoph Hellwig /* 1743598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1744598ecfbaSChristoph Hellwig * i_size or not. 1745598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1746598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1747598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1748598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1749598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1750598ecfbaSChristoph Hellwig * | | Straddles | 1751598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1752598ecfbaSChristoph Hellwig */ 1753e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, isize); 175481d4782aSMatthew Wilcox (Oracle) pgoff_t end_index = isize >> PAGE_SHIFT; 1755598ecfbaSChristoph Hellwig 1756598ecfbaSChristoph Hellwig /* 1757d58562caSChris Mason * Skip the page if it's fully outside i_size, e.g. 1758d58562caSChris Mason * due to a truncate operation that's in progress. We've 1759d58562caSChris Mason * cleaned this page and truncate will finish things off for 1760d58562caSChris Mason * us. 1761598ecfbaSChristoph Hellwig * 1762f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given 1763f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we 1764f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with 1765f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would 1766f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be 1767f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in 1768f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation 1769f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by 1770f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its 1771598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1772598ecfbaSChristoph Hellwig */ 1773e735c007SMatthew Wilcox (Oracle) if (folio->index > end_index || 1774e735c007SMatthew Wilcox (Oracle) (folio->index == end_index && poff == 0)) 1775d58562caSChris Mason goto unlock; 1776598ecfbaSChristoph Hellwig 1777598ecfbaSChristoph Hellwig /* 1778598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1779598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1780598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1781598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1782598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1783598ecfbaSChristoph Hellwig * not written out to the file." 1784598ecfbaSChristoph Hellwig */ 1785e735c007SMatthew Wilcox (Oracle) folio_zero_segment(folio, poff, folio_size(folio)); 178681d4782aSMatthew Wilcox (Oracle) end_pos = isize; 1787598ecfbaSChristoph Hellwig } 1788598ecfbaSChristoph Hellwig 1789e735c007SMatthew Wilcox (Oracle) return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1790598ecfbaSChristoph Hellwig 1791598ecfbaSChristoph Hellwig redirty: 1792e735c007SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio); 1793d58562caSChris Mason unlock: 1794e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1795598ecfbaSChristoph Hellwig return 0; 1796598ecfbaSChristoph Hellwig } 1797598ecfbaSChristoph Hellwig 1798598ecfbaSChristoph Hellwig int 1799598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1800598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1801598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1802598ecfbaSChristoph Hellwig { 1803598ecfbaSChristoph Hellwig int ret; 1804598ecfbaSChristoph Hellwig 1805598ecfbaSChristoph Hellwig wpc->ops = ops; 1806598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1807598ecfbaSChristoph Hellwig if (!wpc->ioend) 1808598ecfbaSChristoph Hellwig return ret; 1809598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1810598ecfbaSChristoph Hellwig } 1811598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1812598ecfbaSChristoph Hellwig 1813598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1814598ecfbaSChristoph Hellwig { 1815598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1816598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1817598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1818598ecfbaSChristoph Hellwig } 1819598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1820