1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ebb7fb15SDave Chinner #define IOEND_BATCH_SIZE 4096 25ebb7fb15SDave Chinner 260af2b37dSRitesh Harjani (IBM) typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length); 27ab08b01eSChristoph Hellwig /* 284ce02c67SRitesh Harjani (IBM) * Structure allocated for each folio to track per-block uptodate, dirty state 2904f52c4eSRitesh Harjani (IBM) * and I/O completions. 30ab08b01eSChristoph Hellwig */ 3104f52c4eSRitesh Harjani (IBM) struct iomap_folio_state { 327d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending; 330fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending; 3404f52c4eSRitesh Harjani (IBM) spinlock_t state_lock; 354ce02c67SRitesh Harjani (IBM) 364ce02c67SRitesh Harjani (IBM) /* 374ce02c67SRitesh Harjani (IBM) * Each block has two bits in this bitmap: 384ce02c67SRitesh Harjani (IBM) * Bits [0..blocks_per_folio) has the uptodate status. 394ce02c67SRitesh Harjani (IBM) * Bits [b_p_f...(2*b_p_f)) has the dirty status. 404ce02c67SRitesh Harjani (IBM) */ 4104f52c4eSRitesh Harjani (IBM) unsigned long state[]; 42ab08b01eSChristoph Hellwig }; 43ab08b01eSChristoph Hellwig 44598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 45598ecfbaSChristoph Hellwig 46cc86181aSRitesh Harjani (IBM) static inline bool ifs_is_fully_uptodate(struct folio *folio, 47cc86181aSRitesh Harjani (IBM) struct iomap_folio_state *ifs) 48cc86181aSRitesh Harjani (IBM) { 49cc86181aSRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 50cc86181aSRitesh Harjani (IBM) 51cc86181aSRitesh Harjani (IBM) return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); 52cc86181aSRitesh Harjani (IBM) } 53cc86181aSRitesh Harjani (IBM) 54cc86181aSRitesh Harjani (IBM) static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs, 55cc86181aSRitesh Harjani (IBM) unsigned int block) 56cc86181aSRitesh Harjani (IBM) { 57cc86181aSRitesh Harjani (IBM) return test_bit(block, ifs->state); 58cc86181aSRitesh Harjani (IBM) } 59cc86181aSRitesh Harjani (IBM) 603ea5c76cSRitesh Harjani (IBM) static void ifs_set_range_uptodate(struct folio *folio, 613ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len) 623ea5c76cSRitesh Harjani (IBM) { 633ea5c76cSRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 643ea5c76cSRitesh Harjani (IBM) unsigned int first_blk = off >> inode->i_blkbits; 653ea5c76cSRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 663ea5c76cSRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1; 673ea5c76cSRitesh Harjani (IBM) unsigned long flags; 683ea5c76cSRitesh Harjani (IBM) 693ea5c76cSRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags); 703ea5c76cSRitesh Harjani (IBM) bitmap_set(ifs->state, first_blk, nr_blks); 71cc86181aSRitesh Harjani (IBM) if (ifs_is_fully_uptodate(folio, ifs)) 723ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio); 733ea5c76cSRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags); 743ea5c76cSRitesh Harjani (IBM) } 753ea5c76cSRitesh Harjani (IBM) 763ea5c76cSRitesh Harjani (IBM) static void iomap_set_range_uptodate(struct folio *folio, size_t off, 773ea5c76cSRitesh Harjani (IBM) size_t len) 783ea5c76cSRitesh Harjani (IBM) { 793ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 803ea5c76cSRitesh Harjani (IBM) 813ea5c76cSRitesh Harjani (IBM) if (ifs) 823ea5c76cSRitesh Harjani (IBM) ifs_set_range_uptodate(folio, ifs, off, len); 833ea5c76cSRitesh Harjani (IBM) else 843ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio); 853ea5c76cSRitesh Harjani (IBM) } 863ea5c76cSRitesh Harjani (IBM) 874ce02c67SRitesh Harjani (IBM) static inline bool ifs_block_is_dirty(struct folio *folio, 884ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, int block) 894ce02c67SRitesh Harjani (IBM) { 904ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 914ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); 924ce02c67SRitesh Harjani (IBM) 934ce02c67SRitesh Harjani (IBM) return test_bit(block + blks_per_folio, ifs->state); 944ce02c67SRitesh Harjani (IBM) } 954ce02c67SRitesh Harjani (IBM) 964ce02c67SRitesh Harjani (IBM) static void ifs_clear_range_dirty(struct folio *folio, 974ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len) 984ce02c67SRitesh Harjani (IBM) { 994ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 1004ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); 1014ce02c67SRitesh Harjani (IBM) unsigned int first_blk = (off >> inode->i_blkbits); 1024ce02c67SRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 1034ce02c67SRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1; 1044ce02c67SRitesh Harjani (IBM) unsigned long flags; 1054ce02c67SRitesh Harjani (IBM) 1064ce02c67SRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags); 1074ce02c67SRitesh Harjani (IBM) bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks); 1084ce02c67SRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags); 1094ce02c67SRitesh Harjani (IBM) } 1104ce02c67SRitesh Harjani (IBM) 1114ce02c67SRitesh Harjani (IBM) static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) 1124ce02c67SRitesh Harjani (IBM) { 1134ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 1144ce02c67SRitesh Harjani (IBM) 1154ce02c67SRitesh Harjani (IBM) if (ifs) 1164ce02c67SRitesh Harjani (IBM) ifs_clear_range_dirty(folio, ifs, off, len); 1174ce02c67SRitesh Harjani (IBM) } 1184ce02c67SRitesh Harjani (IBM) 1194ce02c67SRitesh Harjani (IBM) static void ifs_set_range_dirty(struct folio *folio, 1204ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len) 1214ce02c67SRitesh Harjani (IBM) { 1224ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host; 1234ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); 1244ce02c67SRitesh Harjani (IBM) unsigned int first_blk = (off >> inode->i_blkbits); 1254ce02c67SRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; 1264ce02c67SRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1; 1274ce02c67SRitesh Harjani (IBM) unsigned long flags; 1284ce02c67SRitesh Harjani (IBM) 1294ce02c67SRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags); 1304ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks); 1314ce02c67SRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags); 1324ce02c67SRitesh Harjani (IBM) } 1334ce02c67SRitesh Harjani (IBM) 1344ce02c67SRitesh Harjani (IBM) static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) 1354ce02c67SRitesh Harjani (IBM) { 1364ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 1374ce02c67SRitesh Harjani (IBM) 1384ce02c67SRitesh Harjani (IBM) if (ifs) 1394ce02c67SRitesh Harjani (IBM) ifs_set_range_dirty(folio, ifs, off, len); 1404ce02c67SRitesh Harjani (IBM) } 1414ce02c67SRitesh Harjani (IBM) 14204f52c4eSRitesh Harjani (IBM) static struct iomap_folio_state *ifs_alloc(struct inode *inode, 14304f52c4eSRitesh Harjani (IBM) struct folio *folio, unsigned int flags) 144afc51aaaSDarrick J. Wong { 14504f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 146435d44b3SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 1479753b868SStefan Roesch gfp_t gfp; 148afc51aaaSDarrick J. Wong 14904f52c4eSRitesh Harjani (IBM) if (ifs || nr_blocks <= 1) 15004f52c4eSRitesh Harjani (IBM) return ifs; 151afc51aaaSDarrick J. Wong 1529753b868SStefan Roesch if (flags & IOMAP_NOWAIT) 1539753b868SStefan Roesch gfp = GFP_NOWAIT; 1549753b868SStefan Roesch else 1559753b868SStefan Roesch gfp = GFP_NOFS | __GFP_NOFAIL; 1569753b868SStefan Roesch 1574ce02c67SRitesh Harjani (IBM) /* 1584ce02c67SRitesh Harjani (IBM) * ifs->state tracks two sets of state flags when the 1594ce02c67SRitesh Harjani (IBM) * filesystem block size is smaller than the folio size. 1604ce02c67SRitesh Harjani (IBM) * The first state tracks per-block uptodate and the 1614ce02c67SRitesh Harjani (IBM) * second tracks per-block dirty state. 1624ce02c67SRitesh Harjani (IBM) */ 1634ce02c67SRitesh Harjani (IBM) ifs = kzalloc(struct_size(ifs, state, 1644ce02c67SRitesh Harjani (IBM) BITS_TO_LONGS(2 * nr_blocks)), gfp); 1654ce02c67SRitesh Harjani (IBM) if (!ifs) 1664ce02c67SRitesh Harjani (IBM) return ifs; 1674ce02c67SRitesh Harjani (IBM) 16804f52c4eSRitesh Harjani (IBM) spin_lock_init(&ifs->state_lock); 169435d44b3SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 1704ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, 0, nr_blocks); 1714ce02c67SRitesh Harjani (IBM) if (folio_test_dirty(folio)) 1724ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, nr_blocks, nr_blocks); 17304f52c4eSRitesh Harjani (IBM) folio_attach_private(folio, ifs); 1744ce02c67SRitesh Harjani (IBM) 17504f52c4eSRitesh Harjani (IBM) return ifs; 176afc51aaaSDarrick J. Wong } 177afc51aaaSDarrick J. Wong 17804f52c4eSRitesh Harjani (IBM) static void ifs_free(struct folio *folio) 179afc51aaaSDarrick J. Wong { 18004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio_detach_private(folio); 181afc51aaaSDarrick J. Wong 18204f52c4eSRitesh Harjani (IBM) if (!ifs) 183afc51aaaSDarrick J. Wong return; 18404f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending)); 18504f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); 186cc86181aSRitesh Harjani (IBM) WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != 187c46e8324SMatthew Wilcox (Oracle) folio_test_uptodate(folio)); 18804f52c4eSRitesh Harjani (IBM) kfree(ifs); 189afc51aaaSDarrick J. Wong } 190afc51aaaSDarrick J. Wong 191afc51aaaSDarrick J. Wong /* 192431c0566SMatthew Wilcox (Oracle) * Calculate the range inside the folio that we actually need to read. 193afc51aaaSDarrick J. Wong */ 194431c0566SMatthew Wilcox (Oracle) static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 195431c0566SMatthew Wilcox (Oracle) loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 196afc51aaaSDarrick J. Wong { 19704f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 198afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 199afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 200afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 201afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 202431c0566SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, *pos); 203431c0566SMatthew Wilcox (Oracle) size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 204afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 205afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 206afc51aaaSDarrick J. Wong 207afc51aaaSDarrick J. Wong /* 208f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the 209afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 210afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 211afc51aaaSDarrick J. Wong */ 21204f52c4eSRitesh Harjani (IBM) if (ifs) { 213afc51aaaSDarrick J. Wong unsigned int i; 214afc51aaaSDarrick J. Wong 215afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 216afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 217cc86181aSRitesh Harjani (IBM) if (!ifs_block_is_uptodate(ifs, i)) 218afc51aaaSDarrick J. Wong break; 219afc51aaaSDarrick J. Wong *pos += block_size; 220afc51aaaSDarrick J. Wong poff += block_size; 221afc51aaaSDarrick J. Wong plen -= block_size; 222afc51aaaSDarrick J. Wong first++; 223afc51aaaSDarrick J. Wong } 224afc51aaaSDarrick J. Wong 225afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 226afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 227cc86181aSRitesh Harjani (IBM) if (ifs_block_is_uptodate(ifs, i)) { 228afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 229afc51aaaSDarrick J. Wong last = i - 1; 230afc51aaaSDarrick J. Wong break; 231afc51aaaSDarrick J. Wong } 232afc51aaaSDarrick J. Wong } 233afc51aaaSDarrick J. Wong } 234afc51aaaSDarrick J. Wong 235afc51aaaSDarrick J. Wong /* 236f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to 237afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 238afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 239afc51aaaSDarrick J. Wong */ 240afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 241431c0566SMatthew Wilcox (Oracle) unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 242afc51aaaSDarrick J. Wong 243afc51aaaSDarrick J. Wong if (first <= end && last > end) 244afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 245afc51aaaSDarrick J. Wong } 246afc51aaaSDarrick J. Wong 247afc51aaaSDarrick J. Wong *offp = poff; 248afc51aaaSDarrick J. Wong *lenp = plen; 249afc51aaaSDarrick J. Wong } 250afc51aaaSDarrick J. Wong 2518ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_read(struct folio *folio, size_t offset, 2528ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 253afc51aaaSDarrick J. Wong { 25404f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 255afc51aaaSDarrick J. Wong 256afc51aaaSDarrick J. Wong if (unlikely(error)) { 2578ffd74e9SMatthew Wilcox (Oracle) folio_clear_uptodate(folio); 2588ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 259afc51aaaSDarrick J. Wong } else { 2603ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, len); 261afc51aaaSDarrick J. Wong } 262afc51aaaSDarrick J. Wong 26304f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending)) 2648ffd74e9SMatthew Wilcox (Oracle) folio_unlock(folio); 265afc51aaaSDarrick J. Wong } 266afc51aaaSDarrick J. Wong 2678ffd74e9SMatthew Wilcox (Oracle) static void iomap_read_end_io(struct bio *bio) 268afc51aaaSDarrick J. Wong { 269afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 2708ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 271afc51aaaSDarrick J. Wong 2728ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio) 2738ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 274afc51aaaSDarrick J. Wong bio_put(bio); 275afc51aaaSDarrick J. Wong } 276afc51aaaSDarrick J. Wong 277afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 2783aa9c659SMatthew Wilcox (Oracle) struct folio *cur_folio; 2793aa9c659SMatthew Wilcox (Oracle) bool cur_folio_in_bio; 280afc51aaaSDarrick J. Wong struct bio *bio; 2819d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac; 282afc51aaaSDarrick J. Wong }; 283afc51aaaSDarrick J. Wong 2845ad448ceSAndreas Gruenbacher /** 2855ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache 2865ad448ceSAndreas Gruenbacher * @iter: iteration structure 287874628a2SMatthew Wilcox (Oracle) * @folio: folio to copy to 2885ad448ceSAndreas Gruenbacher * 289874628a2SMatthew Wilcox (Oracle) * Copy the inline data in @iter into @folio and zero out the rest of the folio. 2905ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file. 2915ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno. 2925ad448ceSAndreas Gruenbacher */ 2935ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter, 294874628a2SMatthew Wilcox (Oracle) struct folio *folio) 295afc51aaaSDarrick J. Wong { 296fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter); 2971b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset; 298b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset); 299431c0566SMatthew Wilcox (Oracle) size_t offset = offset_in_folio(folio, iomap->offset); 300afc51aaaSDarrick J. Wong void *addr; 301afc51aaaSDarrick J. Wong 302874628a2SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 3035ad448ceSAndreas Gruenbacher return 0; 304afc51aaaSDarrick J. Wong 305ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 306ae44f9c2SMatthew Wilcox (Oracle) return -EIO; 30769f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE - 30869f4a26cSGao Xiang offset_in_page(iomap->inline_data))) 30969f4a26cSGao Xiang return -EIO; 31069f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length)) 31169f4a26cSGao Xiang return -EIO; 312431c0566SMatthew Wilcox (Oracle) if (offset > 0) 3133ea5c76cSRitesh Harjani (IBM) ifs_alloc(iter->inode, folio, iter->flags); 314afc51aaaSDarrick J. Wong 315874628a2SMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, offset); 316afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 317b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size); 318ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 3193ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff); 3205ad448ceSAndreas Gruenbacher return 0; 321afc51aaaSDarrick J. Wong } 322afc51aaaSDarrick J. Wong 323fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 3241b5c1e36SChristoph Hellwig loff_t pos) 325009d8d84SChristoph Hellwig { 326fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 3271b5c1e36SChristoph Hellwig 3281b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED || 3291b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) || 3301b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode); 331009d8d84SChristoph Hellwig } 332009d8d84SChristoph Hellwig 333fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 334f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset) 335afc51aaaSDarrick J. Wong { 336fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 337f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset; 338f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset; 3393aa9c659SMatthew Wilcox (Oracle) struct folio *folio = ctx->cur_folio; 34004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs; 341afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 342431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 343afc51aaaSDarrick J. Wong sector_t sector; 344afc51aaaSDarrick J. Wong 3455ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE) 346874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 347afc51aaaSDarrick J. Wong 348afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 34904f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags); 350431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 351afc51aaaSDarrick J. Wong if (plen == 0) 352afc51aaaSDarrick J. Wong goto done; 353afc51aaaSDarrick J. Wong 3541b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) { 355431c0566SMatthew Wilcox (Oracle) folio_zero_range(folio, poff, plen); 3563ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen); 357afc51aaaSDarrick J. Wong goto done; 358afc51aaaSDarrick J. Wong } 359afc51aaaSDarrick J. Wong 3603aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = true; 36104f52c4eSRitesh Harjani (IBM) if (ifs) 36204f52c4eSRitesh Harjani (IBM) atomic_add(plen, &ifs->read_bytes_pending); 363afc51aaaSDarrick J. Wong 364afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 365d0364f94SChristoph Hellwig if (!ctx->bio || 366d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector || 367431c0566SMatthew Wilcox (Oracle) !bio_add_folio(ctx->bio, folio, plen, poff)) { 3683aa9c659SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 369457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp; 3705f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 371afc51aaaSDarrick J. Wong 372afc51aaaSDarrick J. Wong if (ctx->bio) 373afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 374afc51aaaSDarrick J. Wong 3759d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */ 376afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 37707888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 37807888c66SChristoph Hellwig REQ_OP_READ, gfp); 379457df33eSMatthew Wilcox (Oracle) /* 380457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to 381457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates 382f132ab7dSMatthew Wilcox (Oracle) * what do_mpage_read_folio does. 383457df33eSMatthew Wilcox (Oracle) */ 38407888c66SChristoph Hellwig if (!ctx->bio) { 38507888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 38607888c66SChristoph Hellwig orig_gfp); 38707888c66SChristoph Hellwig } 3889d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) 389afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 390afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 391afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 392c2478469SJohannes Thumshirn bio_add_folio_nofail(ctx->bio, folio, plen, poff); 393afc51aaaSDarrick J. Wong } 394431c0566SMatthew Wilcox (Oracle) 395afc51aaaSDarrick J. Wong done: 396afc51aaaSDarrick J. Wong /* 397afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 398f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but 399afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 400afc51aaaSDarrick J. Wong * iteration. 401afc51aaaSDarrick J. Wong */ 402afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 403afc51aaaSDarrick J. Wong } 404afc51aaaSDarrick J. Wong 4057479c505SMatthew Wilcox (Oracle) int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 406afc51aaaSDarrick J. Wong { 407f6d48000SChristoph Hellwig struct iomap_iter iter = { 4083aa9c659SMatthew Wilcox (Oracle) .inode = folio->mapping->host, 4093aa9c659SMatthew Wilcox (Oracle) .pos = folio_pos(folio), 4103aa9c659SMatthew Wilcox (Oracle) .len = folio_size(folio), 411f6d48000SChristoph Hellwig }; 412f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = { 4133aa9c659SMatthew Wilcox (Oracle) .cur_folio = folio, 414f6d48000SChristoph Hellwig }; 415f6d48000SChristoph Hellwig int ret; 416afc51aaaSDarrick J. Wong 4173aa9c659SMatthew Wilcox (Oracle) trace_iomap_readpage(iter.inode, 1); 4189e91c572SChristoph Hellwig 419f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 420f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 421f6d48000SChristoph Hellwig 422f6d48000SChristoph Hellwig if (ret < 0) 4233aa9c659SMatthew Wilcox (Oracle) folio_set_error(folio); 424afc51aaaSDarrick J. Wong 425afc51aaaSDarrick J. Wong if (ctx.bio) { 426afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 4273aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(!ctx.cur_folio_in_bio); 428afc51aaaSDarrick J. Wong } else { 4293aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(ctx.cur_folio_in_bio); 4303aa9c659SMatthew Wilcox (Oracle) folio_unlock(folio); 431afc51aaaSDarrick J. Wong } 432afc51aaaSDarrick J. Wong 433afc51aaaSDarrick J. Wong /* 4342c69e205SMatthew Wilcox (Oracle) * Just like mpage_readahead and block_read_full_folio, we always 4357479c505SMatthew Wilcox (Oracle) * return 0 and just set the folio error flag on errors. This 436f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually. 437afc51aaaSDarrick J. Wong */ 438afc51aaaSDarrick J. Wong return 0; 439afc51aaaSDarrick J. Wong } 4407479c505SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_read_folio); 441afc51aaaSDarrick J. Wong 442fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 443f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx) 444afc51aaaSDarrick J. Wong { 445f6d48000SChristoph Hellwig loff_t length = iomap_length(iter); 446afc51aaaSDarrick J. Wong loff_t done, ret; 447afc51aaaSDarrick J. Wong 448afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 4493aa9c659SMatthew Wilcox (Oracle) if (ctx->cur_folio && 4503aa9c659SMatthew Wilcox (Oracle) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 4513aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio_in_bio) 4523aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx->cur_folio); 4533aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = NULL; 454afc51aaaSDarrick J. Wong } 4553aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio) { 4563aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = readahead_folio(ctx->rac); 4573aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = false; 458afc51aaaSDarrick J. Wong } 459f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done); 460d8af404fSAndreas Gruenbacher if (ret <= 0) 461d8af404fSAndreas Gruenbacher return ret; 462afc51aaaSDarrick J. Wong } 463afc51aaaSDarrick J. Wong 464afc51aaaSDarrick J. Wong return done; 465afc51aaaSDarrick J. Wong } 466afc51aaaSDarrick J. Wong 4679d24a13aSMatthew Wilcox (Oracle) /** 4689d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file. 4699d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read. 4709d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem. 4719d24a13aSMatthew Wilcox (Oracle) * 4729d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead 4739d24a13aSMatthew Wilcox (Oracle) * address_space operation. 4749d24a13aSMatthew Wilcox (Oracle) * 4759d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of 4769d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to 4779d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided. 4789d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This 4799d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause 4809d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered. 4819d24a13aSMatthew Wilcox (Oracle) */ 4829d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 483afc51aaaSDarrick J. Wong { 484f6d48000SChristoph Hellwig struct iomap_iter iter = { 485f6d48000SChristoph Hellwig .inode = rac->mapping->host, 486f6d48000SChristoph Hellwig .pos = readahead_pos(rac), 487f6d48000SChristoph Hellwig .len = readahead_length(rac), 488f6d48000SChristoph Hellwig }; 489afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 4909d24a13aSMatthew Wilcox (Oracle) .rac = rac, 491afc51aaaSDarrick J. Wong }; 492afc51aaaSDarrick J. Wong 493f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 4949e91c572SChristoph Hellwig 495f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0) 496f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx); 4979d24a13aSMatthew Wilcox (Oracle) 498afc51aaaSDarrick J. Wong if (ctx.bio) 499afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 5003aa9c659SMatthew Wilcox (Oracle) if (ctx.cur_folio) { 5013aa9c659SMatthew Wilcox (Oracle) if (!ctx.cur_folio_in_bio) 5023aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx.cur_folio); 503afc51aaaSDarrick J. Wong } 504afc51aaaSDarrick J. Wong } 5059d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead); 506afc51aaaSDarrick J. Wong 507afc51aaaSDarrick J. Wong /* 5082e7e80f7SMatthew Wilcox (Oracle) * iomap_is_partially_uptodate checks whether blocks within a folio are 509afc51aaaSDarrick J. Wong * uptodate or not. 510afc51aaaSDarrick J. Wong * 5112e7e80f7SMatthew Wilcox (Oracle) * Returns true if all blocks which correspond to the specified part 5122e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate. 513afc51aaaSDarrick J. Wong */ 5142e7e80f7SMatthew Wilcox (Oracle) bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 515afc51aaaSDarrick J. Wong { 51604f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 5172e7e80f7SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 5182e7e80f7SMatthew Wilcox (Oracle) unsigned first, last, i; 519afc51aaaSDarrick J. Wong 52004f52c4eSRitesh Harjani (IBM) if (!ifs) 5212e7e80f7SMatthew Wilcox (Oracle) return false; 5222e7e80f7SMatthew Wilcox (Oracle) 5232756c818SMatthew Wilcox (Oracle) /* Caller's range may extend past the end of this folio */ 5242756c818SMatthew Wilcox (Oracle) count = min(folio_size(folio) - from, count); 525afc51aaaSDarrick J. Wong 5262756c818SMatthew Wilcox (Oracle) /* First and last blocks in range within folio */ 527afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 5282756c818SMatthew Wilcox (Oracle) last = (from + count - 1) >> inode->i_blkbits; 529afc51aaaSDarrick J. Wong 530afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 531cc86181aSRitesh Harjani (IBM) if (!ifs_block_is_uptodate(ifs, i)) 5322e7e80f7SMatthew Wilcox (Oracle) return false; 5332e7e80f7SMatthew Wilcox (Oracle) return true; 534afc51aaaSDarrick J. Wong } 535afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 536afc51aaaSDarrick J. Wong 53798321b51SAndreas Gruenbacher /** 53898321b51SAndreas Gruenbacher * iomap_get_folio - get a folio reference for writing 53998321b51SAndreas Gruenbacher * @iter: iteration structure 54098321b51SAndreas Gruenbacher * @pos: start offset of write 541d6bb59a9SMatthew Wilcox (Oracle) * @len: Suggested size of folio to create. 54298321b51SAndreas Gruenbacher * 54398321b51SAndreas Gruenbacher * Returns a locked reference to the folio at @pos, or an error pointer if the 54498321b51SAndreas Gruenbacher * folio could not be obtained. 54598321b51SAndreas Gruenbacher */ 546d6bb59a9SMatthew Wilcox (Oracle) struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) 54798321b51SAndreas Gruenbacher { 548ffc143dbSMatthew Wilcox (Oracle) fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; 54998321b51SAndreas Gruenbacher 55098321b51SAndreas Gruenbacher if (iter->flags & IOMAP_NOWAIT) 55198321b51SAndreas Gruenbacher fgp |= FGP_NOWAIT; 552d6bb59a9SMatthew Wilcox (Oracle) fgp |= fgf_set_order(len); 55398321b51SAndreas Gruenbacher 55466dabbb6SChristoph Hellwig return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 55598321b51SAndreas Gruenbacher fgp, mapping_gfp_mask(iter->inode->i_mapping)); 55698321b51SAndreas Gruenbacher } 55798321b51SAndreas Gruenbacher EXPORT_SYMBOL_GPL(iomap_get_folio); 55898321b51SAndreas Gruenbacher 5598597447dSMatthew Wilcox (Oracle) bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 560afc51aaaSDarrick J. Wong { 5618597447dSMatthew Wilcox (Oracle) trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 56239f16c83SMatthew Wilcox (Oracle) folio_size(folio)); 5639e91c572SChristoph Hellwig 564afc51aaaSDarrick J. Wong /* 5657a8eb01bSMatthew Wilcox (Oracle) * If the folio is dirty, we refuse to release our metadata because 5667a8eb01bSMatthew Wilcox (Oracle) * it may be partially dirty. Once we track per-block dirty state, 5677a8eb01bSMatthew Wilcox (Oracle) * we can release the metadata if every block is dirty. 568afc51aaaSDarrick J. Wong */ 5697a8eb01bSMatthew Wilcox (Oracle) if (folio_test_dirty(folio)) 5708597447dSMatthew Wilcox (Oracle) return false; 57104f52c4eSRitesh Harjani (IBM) ifs_free(folio); 5728597447dSMatthew Wilcox (Oracle) return true; 573afc51aaaSDarrick J. Wong } 5748597447dSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_release_folio); 575afc51aaaSDarrick J. Wong 5768306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 577afc51aaaSDarrick J. Wong { 578d82354f6SMatthew Wilcox (Oracle) trace_iomap_invalidate_folio(folio->mapping->host, 5791241ebecSMatthew Wilcox (Oracle) folio_pos(folio) + offset, len); 5809e91c572SChristoph Hellwig 581afc51aaaSDarrick J. Wong /* 58260d82310SMatthew Wilcox (Oracle) * If we're invalidating the entire folio, clear the dirty state 58360d82310SMatthew Wilcox (Oracle) * from it and release it to avoid unnecessary buildup of the LRU. 584afc51aaaSDarrick J. Wong */ 5858306a5f5SMatthew Wilcox (Oracle) if (offset == 0 && len == folio_size(folio)) { 5868306a5f5SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 5878306a5f5SMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 58804f52c4eSRitesh Harjani (IBM) ifs_free(folio); 589afc51aaaSDarrick J. Wong } 590afc51aaaSDarrick J. Wong } 5918306a5f5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 5928306a5f5SMatthew Wilcox (Oracle) 5934ce02c67SRitesh Harjani (IBM) bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) 5944ce02c67SRitesh Harjani (IBM) { 5954ce02c67SRitesh Harjani (IBM) struct inode *inode = mapping->host; 5964ce02c67SRitesh Harjani (IBM) size_t len = folio_size(folio); 5974ce02c67SRitesh Harjani (IBM) 5984ce02c67SRitesh Harjani (IBM) ifs_alloc(inode, folio, 0); 5994ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, 0, len); 6004ce02c67SRitesh Harjani (IBM) return filemap_dirty_folio(mapping, folio); 6014ce02c67SRitesh Harjani (IBM) } 6024ce02c67SRitesh Harjani (IBM) EXPORT_SYMBOL_GPL(iomap_dirty_folio); 6034ce02c67SRitesh Harjani (IBM) 604afc51aaaSDarrick J. Wong static void 605afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 606afc51aaaSDarrick J. Wong { 607afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 608afc51aaaSDarrick J. Wong 609afc51aaaSDarrick J. Wong /* 610afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 611afc51aaaSDarrick J. Wong * write started inside the existing inode size. 612afc51aaaSDarrick J. Wong */ 613afc51aaaSDarrick J. Wong if (pos + len > i_size) 614b71450e2SAndreas Gruenbacher truncate_pagecache_range(inode, max(pos, i_size), 615b71450e2SAndreas Gruenbacher pos + len - 1); 616afc51aaaSDarrick J. Wong } 617afc51aaaSDarrick J. Wong 618431c0566SMatthew Wilcox (Oracle) static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 619431c0566SMatthew Wilcox (Oracle) size_t poff, size_t plen, const struct iomap *iomap) 620afc51aaaSDarrick J. Wong { 621afc51aaaSDarrick J. Wong struct bio_vec bvec; 622afc51aaaSDarrick J. Wong struct bio bio; 623afc51aaaSDarrick J. Wong 62449add496SChristoph Hellwig bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 625afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 626c2478469SJohannes Thumshirn bio_add_folio_nofail(&bio, folio, plen, poff); 627afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 628afc51aaaSDarrick J. Wong } 629afc51aaaSDarrick J. Wong 630fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 631bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio *folio) 632afc51aaaSDarrick J. Wong { 633fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 63404f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs; 6351b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode); 6366cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size); 6376cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size); 638cae2de69SStefan Roesch unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); 639431c0566SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos), to = from + len; 640431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 641afc51aaaSDarrick J. Wong 642a01b8f22SRitesh Harjani (IBM) /* 64335d30c9cSDarrick J. Wong * If the write or zeroing completely overlaps the current folio, then 644a01b8f22SRitesh Harjani (IBM) * entire folio will be dirtied so there is no need for 645a01b8f22SRitesh Harjani (IBM) * per-block state tracking structures to be attached to this folio. 64635d30c9cSDarrick J. Wong * For the unshare case, we must read in the ondisk contents because we 64735d30c9cSDarrick J. Wong * are not changing pagecache contents. 648a01b8f22SRitesh Harjani (IBM) */ 64935d30c9cSDarrick J. Wong if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && 650a01b8f22SRitesh Harjani (IBM) pos + len >= folio_pos(folio) + folio_size(folio)) 651afc51aaaSDarrick J. Wong return 0; 652afc51aaaSDarrick J. Wong 65304f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags); 65404f52c4eSRitesh Harjani (IBM) if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) 655cae2de69SStefan Roesch return -EAGAIN; 6569753b868SStefan Roesch 657a01b8f22SRitesh Harjani (IBM) if (folio_test_uptodate(folio)) 658a01b8f22SRitesh Harjani (IBM) return 0; 659a01b8f22SRitesh Harjani (IBM) folio_clear_error(folio); 660a01b8f22SRitesh Harjani (IBM) 661afc51aaaSDarrick J. Wong do { 662431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &block_start, 663afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 664afc51aaaSDarrick J. Wong if (plen == 0) 665afc51aaaSDarrick J. Wong break; 666afc51aaaSDarrick J. Wong 667b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) && 66832a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) && 669d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen)) 670d3b40439SChristoph Hellwig continue; 671d3b40439SChristoph Hellwig 6721b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) { 673b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 67432a38a49SChristoph Hellwig return -EIO; 675431c0566SMatthew Wilcox (Oracle) folio_zero_segments(folio, poff, from, to, poff + plen); 67614284fedSMatthew Wilcox (Oracle) } else { 677cae2de69SStefan Roesch int status; 678cae2de69SStefan Roesch 679cae2de69SStefan Roesch if (iter->flags & IOMAP_NOWAIT) 680cae2de69SStefan Roesch return -EAGAIN; 681cae2de69SStefan Roesch 682cae2de69SStefan Roesch status = iomap_read_folio_sync(block_start, folio, 68314284fedSMatthew Wilcox (Oracle) poff, plen, srcmap); 684d3b40439SChristoph Hellwig if (status) 685d3b40439SChristoph Hellwig return status; 68614284fedSMatthew Wilcox (Oracle) } 6873ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen); 688afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 689afc51aaaSDarrick J. Wong 690d3b40439SChristoph Hellwig return 0; 691afc51aaaSDarrick J. Wong } 692afc51aaaSDarrick J. Wong 69307c22b56SAndreas Gruenbacher static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, 69407c22b56SAndreas Gruenbacher size_t len) 69507c22b56SAndreas Gruenbacher { 696471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 69707c22b56SAndreas Gruenbacher 698471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->get_folio) 699471859f5SAndreas Gruenbacher return folio_ops->get_folio(iter, pos, len); 70007c22b56SAndreas Gruenbacher else 701d6bb59a9SMatthew Wilcox (Oracle) return iomap_get_folio(iter, pos, len); 70207c22b56SAndreas Gruenbacher } 70307c22b56SAndreas Gruenbacher 7047a70a508SAndreas Gruenbacher static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, 7057a70a508SAndreas Gruenbacher struct folio *folio) 7067a70a508SAndreas Gruenbacher { 707471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 7087a70a508SAndreas Gruenbacher 709471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->put_folio) { 710471859f5SAndreas Gruenbacher folio_ops->put_folio(iter->inode, pos, ret, folio); 7119060bc4dSAndreas Gruenbacher } else { 7127a70a508SAndreas Gruenbacher folio_unlock(folio); 7137a70a508SAndreas Gruenbacher folio_put(folio); 7147a70a508SAndreas Gruenbacher } 71580baab88SAndreas Gruenbacher } 7167a70a508SAndreas Gruenbacher 717fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter, 718bc6123a8SMatthew Wilcox (Oracle) struct folio *folio) 71969f4a26cSGao Xiang { 72069f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */ 7211b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 72269f4a26cSGao Xiang return -EIO; 723874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 72469f4a26cSGao Xiang } 72569f4a26cSGao Xiang 726d7b64041SDave Chinner static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, 727bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio **foliop) 728afc51aaaSDarrick J. Wong { 729471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; 730fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 731d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio; 732afc51aaaSDarrick J. Wong int status = 0; 733afc51aaaSDarrick J. Wong 7341b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 7351b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap) 736c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length); 737afc51aaaSDarrick J. Wong 738afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 739afc51aaaSDarrick J. Wong return -EINTR; 740afc51aaaSDarrick J. Wong 741d454ab82SMatthew Wilcox (Oracle) if (!mapping_large_folio_support(iter->inode->i_mapping)) 742d454ab82SMatthew Wilcox (Oracle) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 743d454ab82SMatthew Wilcox (Oracle) 74407c22b56SAndreas Gruenbacher folio = __iomap_get_folio(iter, pos, len); 7459060bc4dSAndreas Gruenbacher if (IS_ERR(folio)) 74698321b51SAndreas Gruenbacher return PTR_ERR(folio); 747d7b64041SDave Chinner 748d7b64041SDave Chinner /* 749d7b64041SDave Chinner * Now we have a locked folio, before we do anything with it we need to 750d7b64041SDave Chinner * check that the iomap we have cached is not stale. The inode extent 751d7b64041SDave Chinner * mapping can change due to concurrent IO in flight (e.g. 752d7b64041SDave Chinner * IOMAP_UNWRITTEN state can change and memory reclaim could have 753d7b64041SDave Chinner * reclaimed a previously partially written page at this index after IO 754d7b64041SDave Chinner * completion before this write reaches this file offset) and hence we 755d7b64041SDave Chinner * could do the wrong thing here (zero a page range incorrectly or fail 756d7b64041SDave Chinner * to zero) and corrupt data. 757d7b64041SDave Chinner */ 758471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->iomap_valid) { 759471859f5SAndreas Gruenbacher bool iomap_valid = folio_ops->iomap_valid(iter->inode, 760d7b64041SDave Chinner &iter->iomap); 761d7b64041SDave Chinner if (!iomap_valid) { 762d7b64041SDave Chinner iter->iomap.flags |= IOMAP_F_STALE; 763d7b64041SDave Chinner status = 0; 764d7b64041SDave Chinner goto out_unlock; 765d7b64041SDave Chinner } 766d7b64041SDave Chinner } 767d7b64041SDave Chinner 768d454ab82SMatthew Wilcox (Oracle) if (pos + len > folio_pos(folio) + folio_size(folio)) 769d454ab82SMatthew Wilcox (Oracle) len = folio_pos(folio) + folio_size(folio) - pos; 770afc51aaaSDarrick J. Wong 771c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) 772bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin_inline(iter, folio); 7731b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 774d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 775afc51aaaSDarrick J. Wong else 776bc6123a8SMatthew Wilcox (Oracle) status = __iomap_write_begin(iter, pos, len, folio); 777afc51aaaSDarrick J. Wong 778afc51aaaSDarrick J. Wong if (unlikely(status)) 779afc51aaaSDarrick J. Wong goto out_unlock; 780afc51aaaSDarrick J. Wong 781bc6123a8SMatthew Wilcox (Oracle) *foliop = folio; 782afc51aaaSDarrick J. Wong return 0; 783afc51aaaSDarrick J. Wong 784afc51aaaSDarrick J. Wong out_unlock: 7857a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, 0, folio); 7861b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 787afc51aaaSDarrick J. Wong 788afc51aaaSDarrick J. Wong return status; 789afc51aaaSDarrick J. Wong } 790afc51aaaSDarrick J. Wong 791e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 792bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 793afc51aaaSDarrick J. Wong { 794bc6123a8SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 795afc51aaaSDarrick J. Wong 796afc51aaaSDarrick J. Wong /* 797afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 7987479c505SMatthew Wilcox (Oracle) * don't have to worry about a read_folio reading them and overwriting a 799f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only 800afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 8017479c505SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write. 802afc51aaaSDarrick J. Wong * 803f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a 804f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to 805f1f264b4SAndreas Gruenbacher * redo the whole thing. 806afc51aaaSDarrick J. Wong */ 807bc6123a8SMatthew Wilcox (Oracle) if (unlikely(copied < len && !folio_test_uptodate(folio))) 808afc51aaaSDarrick J. Wong return 0; 8093ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); 8104ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied); 811bc6123a8SMatthew Wilcox (Oracle) filemap_dirty_folio(inode->i_mapping, folio); 812afc51aaaSDarrick J. Wong return copied; 813afc51aaaSDarrick J. Wong } 814afc51aaaSDarrick J. Wong 815fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter, 8169c4ce08dSMatthew Wilcox (Oracle) struct folio *folio, loff_t pos, size_t copied) 817afc51aaaSDarrick J. Wong { 818fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 819afc51aaaSDarrick J. Wong void *addr; 820afc51aaaSDarrick J. Wong 8219c4ce08dSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 82269f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap)); 823afc51aaaSDarrick J. Wong 8249c4ce08dSMatthew Wilcox (Oracle) flush_dcache_folio(folio); 8259c4ce08dSMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, pos); 826ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied); 827ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 828afc51aaaSDarrick J. Wong 8291b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode); 830afc51aaaSDarrick J. Wong return copied; 831afc51aaaSDarrick J. Wong } 832afc51aaaSDarrick J. Wong 833e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 8341b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 835bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 836afc51aaaSDarrick J. Wong { 837fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8381b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size; 839e25ba8cbSMatthew Wilcox (Oracle) size_t ret; 840afc51aaaSDarrick J. Wong 841c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) { 8429c4ce08dSMatthew Wilcox (Oracle) ret = iomap_write_end_inline(iter, folio, pos, copied); 843c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 8441b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 845bc6123a8SMatthew Wilcox (Oracle) copied, &folio->page, NULL); 846afc51aaaSDarrick J. Wong } else { 847bc6123a8SMatthew Wilcox (Oracle) ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 848afc51aaaSDarrick J. Wong } 849afc51aaaSDarrick J. Wong 850afc51aaaSDarrick J. Wong /* 851afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 852afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 853afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 854afc51aaaSDarrick J. Wong */ 855afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 8561b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret); 8571b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 858afc51aaaSDarrick J. Wong } 8597a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, ret, folio); 860afc51aaaSDarrick J. Wong 861afc51aaaSDarrick J. Wong if (old_size < pos) 8621b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos); 863afc51aaaSDarrick J. Wong if (ret < len) 864d74999c8SAndreas Gruenbacher iomap_write_failed(iter->inode, pos + ret, len - ret); 865afc51aaaSDarrick J. Wong return ret; 866afc51aaaSDarrick J. Wong } 867afc51aaaSDarrick J. Wong 868ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 869afc51aaaSDarrick J. Wong { 870ce83a025SChristoph Hellwig loff_t length = iomap_length(iter); 8715d8edfb9SMatthew Wilcox (Oracle) size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; 872ce83a025SChristoph Hellwig loff_t pos = iter->pos; 873afc51aaaSDarrick J. Wong ssize_t written = 0; 874ce83a025SChristoph Hellwig long status = 0; 875cae2de69SStefan Roesch struct address_space *mapping = iter->inode->i_mapping; 876cae2de69SStefan Roesch unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; 877afc51aaaSDarrick J. Wong 878afc51aaaSDarrick J. Wong do { 879bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 8805d8edfb9SMatthew Wilcox (Oracle) size_t offset; /* Offset into folio */ 8815d8edfb9SMatthew Wilcox (Oracle) size_t bytes; /* Bytes to write to folio */ 882afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 883afc51aaaSDarrick J. Wong 8845d8edfb9SMatthew Wilcox (Oracle) offset = pos & (chunk - 1); 8855d8edfb9SMatthew Wilcox (Oracle) bytes = min(chunk - offset, iov_iter_count(i)); 886cae2de69SStefan Roesch status = balance_dirty_pages_ratelimited_flags(mapping, 887cae2de69SStefan Roesch bdp_flags); 888cae2de69SStefan Roesch if (unlikely(status)) 889cae2de69SStefan Roesch break; 890cae2de69SStefan Roesch 891afc51aaaSDarrick J. Wong if (bytes > length) 892afc51aaaSDarrick J. Wong bytes = length; 893afc51aaaSDarrick J. Wong 894afc51aaaSDarrick J. Wong /* 895f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_. 896afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 897afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 898afc51aaaSDarrick J. Wong * up-to-date. 899cae2de69SStefan Roesch * 900cae2de69SStefan Roesch * For async buffered writes the assumption is that the user 901cae2de69SStefan Roesch * page has already been faulted in. This can be optimized by 902cae2de69SStefan Roesch * faulting the user page. 903afc51aaaSDarrick J. Wong */ 904631f871fSAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 905afc51aaaSDarrick J. Wong status = -EFAULT; 906afc51aaaSDarrick J. Wong break; 907afc51aaaSDarrick J. Wong } 908afc51aaaSDarrick J. Wong 909bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 910afc51aaaSDarrick J. Wong if (unlikely(status)) 911afc51aaaSDarrick J. Wong break; 912d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE) 913d7b64041SDave Chinner break; 914afc51aaaSDarrick J. Wong 9155d8edfb9SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 9165d8edfb9SMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 9175d8edfb9SMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 9185d8edfb9SMatthew Wilcox (Oracle) 919cae2de69SStefan Roesch if (mapping_writably_mapped(mapping)) 9205d8edfb9SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 921afc51aaaSDarrick J. Wong 9225d8edfb9SMatthew Wilcox (Oracle) copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); 923bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, copied, folio); 924afc51aaaSDarrick J. Wong 925f0b65f39SAl Viro if (unlikely(copied != status)) 926f0b65f39SAl Viro iov_iter_revert(i, copied - status); 927afc51aaaSDarrick J. Wong 928f0b65f39SAl Viro cond_resched(); 929bc1bb416SAl Viro if (unlikely(status == 0)) { 930afc51aaaSDarrick J. Wong /* 931bc1bb416SAl Viro * A short copy made iomap_write_end() reject the 932bc1bb416SAl Viro * thing entirely. Might be memory poisoning 933bc1bb416SAl Viro * halfway through, might be a race with munmap, 934bc1bb416SAl Viro * might be severe memory pressure. 935afc51aaaSDarrick J. Wong */ 936bc1bb416SAl Viro if (copied) 937bc1bb416SAl Viro bytes = copied; 9385d8edfb9SMatthew Wilcox (Oracle) if (chunk > PAGE_SIZE) 9395d8edfb9SMatthew Wilcox (Oracle) chunk /= 2; 9405d8edfb9SMatthew Wilcox (Oracle) } else { 941f0b65f39SAl Viro pos += status; 942f0b65f39SAl Viro written += status; 943f0b65f39SAl Viro length -= status; 9445d8edfb9SMatthew Wilcox (Oracle) } 945afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 946afc51aaaSDarrick J. Wong 94718e419f6SStefan Roesch if (status == -EAGAIN) { 94818e419f6SStefan Roesch iov_iter_revert(i, written); 94918e419f6SStefan Roesch return -EAGAIN; 95018e419f6SStefan Roesch } 951afc51aaaSDarrick J. Wong return written ? written : status; 952afc51aaaSDarrick J. Wong } 953afc51aaaSDarrick J. Wong 954afc51aaaSDarrick J. Wong ssize_t 955ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 956afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 957afc51aaaSDarrick J. Wong { 958ce83a025SChristoph Hellwig struct iomap_iter iter = { 959ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 960ce83a025SChristoph Hellwig .pos = iocb->ki_pos, 961ce83a025SChristoph Hellwig .len = iov_iter_count(i), 962ce83a025SChristoph Hellwig .flags = IOMAP_WRITE, 963ce83a025SChristoph Hellwig }; 964219580eeSChristoph Hellwig ssize_t ret; 965afc51aaaSDarrick J. Wong 966cae2de69SStefan Roesch if (iocb->ki_flags & IOCB_NOWAIT) 967cae2de69SStefan Roesch iter.flags |= IOMAP_NOWAIT; 968cae2de69SStefan Roesch 969ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 970ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i); 971219580eeSChristoph Hellwig 97220c64ec8SChristoph Hellwig if (unlikely(iter.pos == iocb->ki_pos)) 973ce83a025SChristoph Hellwig return ret; 974219580eeSChristoph Hellwig ret = iter.pos - iocb->ki_pos; 975efa96cc9SChristoph Hellwig iocb->ki_pos = iter.pos; 976219580eeSChristoph Hellwig return ret; 977afc51aaaSDarrick J. Wong } 978afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 979afc51aaaSDarrick J. Wong 9804ce02c67SRitesh Harjani (IBM) static int iomap_write_delalloc_ifs_punch(struct inode *inode, 9814ce02c67SRitesh Harjani (IBM) struct folio *folio, loff_t start_byte, loff_t end_byte, 9824ce02c67SRitesh Harjani (IBM) iomap_punch_t punch) 9834ce02c67SRitesh Harjani (IBM) { 9844ce02c67SRitesh Harjani (IBM) unsigned int first_blk, last_blk, i; 9854ce02c67SRitesh Harjani (IBM) loff_t last_byte; 9864ce02c67SRitesh Harjani (IBM) u8 blkbits = inode->i_blkbits; 9874ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs; 9884ce02c67SRitesh Harjani (IBM) int ret = 0; 9894ce02c67SRitesh Harjani (IBM) 9904ce02c67SRitesh Harjani (IBM) /* 9914ce02c67SRitesh Harjani (IBM) * When we have per-block dirty tracking, there can be 9924ce02c67SRitesh Harjani (IBM) * blocks within a folio which are marked uptodate 9934ce02c67SRitesh Harjani (IBM) * but not dirty. In that case it is necessary to punch 9944ce02c67SRitesh Harjani (IBM) * out such blocks to avoid leaking any delalloc blocks. 9954ce02c67SRitesh Harjani (IBM) */ 9964ce02c67SRitesh Harjani (IBM) ifs = folio->private; 9974ce02c67SRitesh Harjani (IBM) if (!ifs) 9984ce02c67SRitesh Harjani (IBM) return ret; 9994ce02c67SRitesh Harjani (IBM) 10004ce02c67SRitesh Harjani (IBM) last_byte = min_t(loff_t, end_byte - 1, 10014ce02c67SRitesh Harjani (IBM) folio_pos(folio) + folio_size(folio) - 1); 10024ce02c67SRitesh Harjani (IBM) first_blk = offset_in_folio(folio, start_byte) >> blkbits; 10034ce02c67SRitesh Harjani (IBM) last_blk = offset_in_folio(folio, last_byte) >> blkbits; 10044ce02c67SRitesh Harjani (IBM) for (i = first_blk; i <= last_blk; i++) { 10054ce02c67SRitesh Harjani (IBM) if (!ifs_block_is_dirty(folio, ifs, i)) { 10064ce02c67SRitesh Harjani (IBM) ret = punch(inode, folio_pos(folio) + (i << blkbits), 10074ce02c67SRitesh Harjani (IBM) 1 << blkbits); 10084ce02c67SRitesh Harjani (IBM) if (ret) 10094ce02c67SRitesh Harjani (IBM) return ret; 10104ce02c67SRitesh Harjani (IBM) } 10114ce02c67SRitesh Harjani (IBM) } 10124ce02c67SRitesh Harjani (IBM) 10134ce02c67SRitesh Harjani (IBM) return ret; 10144ce02c67SRitesh Harjani (IBM) } 10154ce02c67SRitesh Harjani (IBM) 10164ce02c67SRitesh Harjani (IBM) 10177f79d85bSRitesh Harjani (IBM) static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, 10187f79d85bSRitesh Harjani (IBM) loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 10197f79d85bSRitesh Harjani (IBM) iomap_punch_t punch) 10207f79d85bSRitesh Harjani (IBM) { 10217f79d85bSRitesh Harjani (IBM) int ret = 0; 10227f79d85bSRitesh Harjani (IBM) 10237f79d85bSRitesh Harjani (IBM) if (!folio_test_dirty(folio)) 10247f79d85bSRitesh Harjani (IBM) return ret; 10257f79d85bSRitesh Harjani (IBM) 10267f79d85bSRitesh Harjani (IBM) /* if dirty, punch up to offset */ 10277f79d85bSRitesh Harjani (IBM) if (start_byte > *punch_start_byte) { 10287f79d85bSRitesh Harjani (IBM) ret = punch(inode, *punch_start_byte, 10297f79d85bSRitesh Harjani (IBM) start_byte - *punch_start_byte); 10307f79d85bSRitesh Harjani (IBM) if (ret) 10317f79d85bSRitesh Harjani (IBM) return ret; 10327f79d85bSRitesh Harjani (IBM) } 10337f79d85bSRitesh Harjani (IBM) 10344ce02c67SRitesh Harjani (IBM) /* Punch non-dirty blocks within folio */ 10354ce02c67SRitesh Harjani (IBM) ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte, 10364ce02c67SRitesh Harjani (IBM) end_byte, punch); 10374ce02c67SRitesh Harjani (IBM) if (ret) 10384ce02c67SRitesh Harjani (IBM) return ret; 10394ce02c67SRitesh Harjani (IBM) 10407f79d85bSRitesh Harjani (IBM) /* 10417f79d85bSRitesh Harjani (IBM) * Make sure the next punch start is correctly bound to 10427f79d85bSRitesh Harjani (IBM) * the end of this data range, not the end of the folio. 10437f79d85bSRitesh Harjani (IBM) */ 10447f79d85bSRitesh Harjani (IBM) *punch_start_byte = min_t(loff_t, end_byte, 10457f79d85bSRitesh Harjani (IBM) folio_pos(folio) + folio_size(folio)); 10467f79d85bSRitesh Harjani (IBM) 10477f79d85bSRitesh Harjani (IBM) return ret; 10487f79d85bSRitesh Harjani (IBM) } 10497f79d85bSRitesh Harjani (IBM) 10509c7babf9SDave Chinner /* 1051f43dc4dcSDave Chinner * Scan the data range passed to us for dirty page cache folios. If we find a 1052f43dc4dcSDave Chinner * dirty folio, punch out the preceeding range and update the offset from which 1053f43dc4dcSDave Chinner * the next punch will start from. 1054f43dc4dcSDave Chinner * 1055f43dc4dcSDave Chinner * We can punch out storage reservations under clean pages because they either 1056f43dc4dcSDave Chinner * contain data that has been written back - in which case the delalloc punch 1057f43dc4dcSDave Chinner * over that range is a no-op - or they have been read faults in which case they 1058f43dc4dcSDave Chinner * contain zeroes and we can remove the delalloc backing range and any new 1059f43dc4dcSDave Chinner * writes to those pages will do the normal hole filling operation... 1060f43dc4dcSDave Chinner * 1061f43dc4dcSDave Chinner * This makes the logic simple: we only need to keep the delalloc extents only 1062f43dc4dcSDave Chinner * over the dirty ranges of the page cache. 1063f43dc4dcSDave Chinner * 1064f43dc4dcSDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1065f43dc4dcSDave Chinner * simplify range iterations. 1066f43dc4dcSDave Chinner */ 1067f43dc4dcSDave Chinner static int iomap_write_delalloc_scan(struct inode *inode, 1068f43dc4dcSDave Chinner loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 10690af2b37dSRitesh Harjani (IBM) iomap_punch_t punch) 1070f43dc4dcSDave Chinner { 1071f43dc4dcSDave Chinner while (start_byte < end_byte) { 1072f43dc4dcSDave Chinner struct folio *folio; 10737f79d85bSRitesh Harjani (IBM) int ret; 1074f43dc4dcSDave Chinner 1075f43dc4dcSDave Chinner /* grab locked page */ 1076f43dc4dcSDave Chinner folio = filemap_lock_folio(inode->i_mapping, 1077f43dc4dcSDave Chinner start_byte >> PAGE_SHIFT); 107866dabbb6SChristoph Hellwig if (IS_ERR(folio)) { 1079f43dc4dcSDave Chinner start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 1080f43dc4dcSDave Chinner PAGE_SIZE; 1081f43dc4dcSDave Chinner continue; 1082f43dc4dcSDave Chinner } 1083f43dc4dcSDave Chinner 10847f79d85bSRitesh Harjani (IBM) ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte, 10857f79d85bSRitesh Harjani (IBM) start_byte, end_byte, punch); 10867f79d85bSRitesh Harjani (IBM) if (ret) { 1087f43dc4dcSDave Chinner folio_unlock(folio); 1088f43dc4dcSDave Chinner folio_put(folio); 10897f79d85bSRitesh Harjani (IBM) return ret; 1090f43dc4dcSDave Chinner } 1091f43dc4dcSDave Chinner 1092f43dc4dcSDave Chinner /* move offset to start of next folio in range */ 1093f43dc4dcSDave Chinner start_byte = folio_next_index(folio) << PAGE_SHIFT; 1094f43dc4dcSDave Chinner folio_unlock(folio); 1095f43dc4dcSDave Chinner folio_put(folio); 1096f43dc4dcSDave Chinner } 1097f43dc4dcSDave Chinner return 0; 1098f43dc4dcSDave Chinner } 1099f43dc4dcSDave Chinner 1100f43dc4dcSDave Chinner /* 1101f43dc4dcSDave Chinner * Punch out all the delalloc blocks in the range given except for those that 1102f43dc4dcSDave Chinner * have dirty data still pending in the page cache - those are going to be 1103f43dc4dcSDave Chinner * written and so must still retain the delalloc backing for writeback. 1104f43dc4dcSDave Chinner * 1105f43dc4dcSDave Chinner * As we are scanning the page cache for data, we don't need to reimplement the 1106f43dc4dcSDave Chinner * wheel - mapping_seek_hole_data() does exactly what we need to identify the 1107f43dc4dcSDave Chinner * start and end of data ranges correctly even for sub-folio block sizes. This 1108f43dc4dcSDave Chinner * byte range based iteration is especially convenient because it means we 1109f43dc4dcSDave Chinner * don't have to care about variable size folios, nor where the start or end of 1110f43dc4dcSDave Chinner * the data range lies within a folio, if they lie within the same folio or even 1111f43dc4dcSDave Chinner * if there are multiple discontiguous data ranges within the folio. 1112f43dc4dcSDave Chinner * 1113f43dc4dcSDave Chinner * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so 1114f43dc4dcSDave Chinner * can return data ranges that exist in the cache beyond EOF. e.g. a page fault 1115f43dc4dcSDave Chinner * spanning EOF will initialise the post-EOF data to zeroes and mark it up to 1116f43dc4dcSDave Chinner * date. A write page fault can then mark it dirty. If we then fail a write() 1117f43dc4dcSDave Chinner * beyond EOF into that up to date cached range, we allocate a delalloc block 1118f43dc4dcSDave Chinner * beyond EOF and then have to punch it out. Because the range is up to date, 1119f43dc4dcSDave Chinner * mapping_seek_hole_data() will return it, and we will skip the punch because 1120f43dc4dcSDave Chinner * the folio is dirty. THis is incorrect - we always need to punch out delalloc 1121f43dc4dcSDave Chinner * beyond EOF in this case as writeback will never write back and covert that 1122f43dc4dcSDave Chinner * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, 1123f43dc4dcSDave Chinner * resulting in always punching out the range from the EOF to the end of the 1124f43dc4dcSDave Chinner * range the iomap spans. 1125f43dc4dcSDave Chinner * 1126f43dc4dcSDave Chinner * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it 1127f43dc4dcSDave Chinner * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA 1128f43dc4dcSDave Chinner * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) 1129f43dc4dcSDave Chinner * returns the end of the data range (data_end). Using closed intervals would 1130f43dc4dcSDave Chinner * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose 1131f43dc4dcSDave Chinner * the code to subtle off-by-one bugs.... 1132f43dc4dcSDave Chinner */ 1133f43dc4dcSDave Chinner static int iomap_write_delalloc_release(struct inode *inode, 11340af2b37dSRitesh Harjani (IBM) loff_t start_byte, loff_t end_byte, iomap_punch_t punch) 1135f43dc4dcSDave Chinner { 1136f43dc4dcSDave Chinner loff_t punch_start_byte = start_byte; 1137f43dc4dcSDave Chinner loff_t scan_end_byte = min(i_size_read(inode), end_byte); 1138f43dc4dcSDave Chinner int error = 0; 1139f43dc4dcSDave Chinner 1140f43dc4dcSDave Chinner /* 1141f43dc4dcSDave Chinner * Lock the mapping to avoid races with page faults re-instantiating 1142f43dc4dcSDave Chinner * folios and dirtying them via ->page_mkwrite whilst we walk the 1143f43dc4dcSDave Chinner * cache and perform delalloc extent removal. Failing to do this can 1144f43dc4dcSDave Chinner * leave dirty pages with no space reservation in the cache. 1145f43dc4dcSDave Chinner */ 1146f43dc4dcSDave Chinner filemap_invalidate_lock(inode->i_mapping); 1147f43dc4dcSDave Chinner while (start_byte < scan_end_byte) { 1148f43dc4dcSDave Chinner loff_t data_end; 1149f43dc4dcSDave Chinner 1150f43dc4dcSDave Chinner start_byte = mapping_seek_hole_data(inode->i_mapping, 1151f43dc4dcSDave Chinner start_byte, scan_end_byte, SEEK_DATA); 1152f43dc4dcSDave Chinner /* 1153f43dc4dcSDave Chinner * If there is no more data to scan, all that is left is to 1154f43dc4dcSDave Chinner * punch out the remaining range. 1155f43dc4dcSDave Chinner */ 1156f43dc4dcSDave Chinner if (start_byte == -ENXIO || start_byte == scan_end_byte) 1157f43dc4dcSDave Chinner break; 1158f43dc4dcSDave Chinner if (start_byte < 0) { 1159f43dc4dcSDave Chinner error = start_byte; 1160f43dc4dcSDave Chinner goto out_unlock; 1161f43dc4dcSDave Chinner } 1162f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte < punch_start_byte); 1163f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte > scan_end_byte); 1164f43dc4dcSDave Chinner 1165f43dc4dcSDave Chinner /* 1166f43dc4dcSDave Chinner * We find the end of this contiguous cached data range by 1167f43dc4dcSDave Chinner * seeking from start_byte to the beginning of the next hole. 1168f43dc4dcSDave Chinner */ 1169f43dc4dcSDave Chinner data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, 1170f43dc4dcSDave Chinner scan_end_byte, SEEK_HOLE); 1171f43dc4dcSDave Chinner if (data_end < 0) { 1172f43dc4dcSDave Chinner error = data_end; 1173f43dc4dcSDave Chinner goto out_unlock; 1174f43dc4dcSDave Chinner } 1175f43dc4dcSDave Chinner WARN_ON_ONCE(data_end <= start_byte); 1176f43dc4dcSDave Chinner WARN_ON_ONCE(data_end > scan_end_byte); 1177f43dc4dcSDave Chinner 1178f43dc4dcSDave Chinner error = iomap_write_delalloc_scan(inode, &punch_start_byte, 1179f43dc4dcSDave Chinner start_byte, data_end, punch); 1180f43dc4dcSDave Chinner if (error) 1181f43dc4dcSDave Chinner goto out_unlock; 1182f43dc4dcSDave Chinner 1183f43dc4dcSDave Chinner /* The next data search starts at the end of this one. */ 1184f43dc4dcSDave Chinner start_byte = data_end; 1185f43dc4dcSDave Chinner } 1186f43dc4dcSDave Chinner 1187f43dc4dcSDave Chinner if (punch_start_byte < end_byte) 1188f43dc4dcSDave Chinner error = punch(inode, punch_start_byte, 1189f43dc4dcSDave Chinner end_byte - punch_start_byte); 1190f43dc4dcSDave Chinner out_unlock: 1191f43dc4dcSDave Chinner filemap_invalidate_unlock(inode->i_mapping); 1192f43dc4dcSDave Chinner return error; 1193f43dc4dcSDave Chinner } 1194f43dc4dcSDave Chinner 1195f43dc4dcSDave Chinner /* 11969c7babf9SDave Chinner * When a short write occurs, the filesystem may need to remove reserved space 11979c7babf9SDave Chinner * that was allocated in ->iomap_begin from it's ->iomap_end method. For 11989c7babf9SDave Chinner * filesystems that use delayed allocation, we need to punch out delalloc 11999c7babf9SDave Chinner * extents from the range that are not dirty in the page cache. As the write can 12009c7babf9SDave Chinner * race with page faults, there can be dirty pages over the delalloc extent 12019c7babf9SDave Chinner * outside the range of a short write but still within the delalloc extent 12029c7babf9SDave Chinner * allocated for this iomap. 12039c7babf9SDave Chinner * 12049c7babf9SDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1205f43dc4dcSDave Chinner * simplify range iterations. 1206f43dc4dcSDave Chinner * 1207f43dc4dcSDave Chinner * The punch() callback *must* only punch delalloc extents in the range passed 1208f43dc4dcSDave Chinner * to it. It must skip over all other types of extents in the range and leave 1209f43dc4dcSDave Chinner * them completely unchanged. It must do this punch atomically with respect to 1210f43dc4dcSDave Chinner * other extent modifications. 1211f43dc4dcSDave Chinner * 1212f43dc4dcSDave Chinner * The punch() callback may be called with a folio locked to prevent writeback 1213f43dc4dcSDave Chinner * extent allocation racing at the edge of the range we are currently punching. 1214f43dc4dcSDave Chinner * The locked folio may or may not cover the range being punched, so it is not 1215f43dc4dcSDave Chinner * safe for the punch() callback to lock folios itself. 1216f43dc4dcSDave Chinner * 1217f43dc4dcSDave Chinner * Lock order is: 1218f43dc4dcSDave Chinner * 1219f43dc4dcSDave Chinner * inode->i_rwsem (shared or exclusive) 1220f43dc4dcSDave Chinner * inode->i_mapping->invalidate_lock (exclusive) 1221f43dc4dcSDave Chinner * folio_lock() 1222f43dc4dcSDave Chinner * ->punch 1223f43dc4dcSDave Chinner * internal filesystem allocation lock 12249c7babf9SDave Chinner */ 12259c7babf9SDave Chinner int iomap_file_buffered_write_punch_delalloc(struct inode *inode, 12269c7babf9SDave Chinner struct iomap *iomap, loff_t pos, loff_t length, 12270af2b37dSRitesh Harjani (IBM) ssize_t written, iomap_punch_t punch) 12289c7babf9SDave Chinner { 12299c7babf9SDave Chinner loff_t start_byte; 12309c7babf9SDave Chinner loff_t end_byte; 1231302efbefSLu Hongfei unsigned int blocksize = i_blocksize(inode); 12329c7babf9SDave Chinner 12339c7babf9SDave Chinner if (iomap->type != IOMAP_DELALLOC) 12349c7babf9SDave Chinner return 0; 12359c7babf9SDave Chinner 12369c7babf9SDave Chinner /* If we didn't reserve the blocks, we're not allowed to punch them. */ 12379c7babf9SDave Chinner if (!(iomap->flags & IOMAP_F_NEW)) 12389c7babf9SDave Chinner return 0; 12399c7babf9SDave Chinner 12409c7babf9SDave Chinner /* 12419c7babf9SDave Chinner * start_byte refers to the first unused block after a short write. If 12429c7babf9SDave Chinner * nothing was written, round offset down to point at the first block in 12439c7babf9SDave Chinner * the range. 12449c7babf9SDave Chinner */ 12459c7babf9SDave Chinner if (unlikely(!written)) 12469c7babf9SDave Chinner start_byte = round_down(pos, blocksize); 12479c7babf9SDave Chinner else 12489c7babf9SDave Chinner start_byte = round_up(pos + written, blocksize); 12499c7babf9SDave Chinner end_byte = round_up(pos + length, blocksize); 12509c7babf9SDave Chinner 12519c7babf9SDave Chinner /* Nothing to do if we've written the entire delalloc extent */ 12529c7babf9SDave Chinner if (start_byte >= end_byte) 12539c7babf9SDave Chinner return 0; 12549c7babf9SDave Chinner 1255f43dc4dcSDave Chinner return iomap_write_delalloc_release(inode, start_byte, end_byte, 1256f43dc4dcSDave Chinner punch); 12579c7babf9SDave Chinner } 12589c7babf9SDave Chinner EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); 12599c7babf9SDave Chinner 12608fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter) 1261afc51aaaSDarrick J. Wong { 12628fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap; 1263fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 12648fc274d1SChristoph Hellwig loff_t pos = iter->pos; 12658fc274d1SChristoph Hellwig loff_t length = iomap_length(iter); 1266d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0; 1267afc51aaaSDarrick J. Wong 12683590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 12693590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 12703590c4d8SChristoph Hellwig return length; 12713590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 1272c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 12733590c4d8SChristoph Hellwig return length; 12743590c4d8SChristoph Hellwig 1275afc51aaaSDarrick J. Wong do { 1276bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 1277*a5f31a50SDarrick J. Wong int status; 1278*a5f31a50SDarrick J. Wong size_t offset; 1279*a5f31a50SDarrick J. Wong size_t bytes = min_t(u64, SIZE_MAX, length); 1280afc51aaaSDarrick J. Wong 1281bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 1282afc51aaaSDarrick J. Wong if (unlikely(status)) 1283afc51aaaSDarrick J. Wong return status; 1284*a5f31a50SDarrick J. Wong if (iomap->flags & IOMAP_F_STALE) 1285d7b64041SDave Chinner break; 1286afc51aaaSDarrick J. Wong 1287*a5f31a50SDarrick J. Wong offset = offset_in_folio(folio, pos); 1288*a5f31a50SDarrick J. Wong if (bytes > folio_size(folio) - offset) 1289*a5f31a50SDarrick J. Wong bytes = folio_size(folio) - offset; 1290*a5f31a50SDarrick J. Wong 1291*a5f31a50SDarrick J. Wong bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 1292*a5f31a50SDarrick J. Wong if (WARN_ON_ONCE(bytes == 0)) 1293afc51aaaSDarrick J. Wong return -EIO; 1294afc51aaaSDarrick J. Wong 1295afc51aaaSDarrick J. Wong cond_resched(); 1296afc51aaaSDarrick J. Wong 1297*a5f31a50SDarrick J. Wong pos += bytes; 1298*a5f31a50SDarrick J. Wong written += bytes; 1299*a5f31a50SDarrick J. Wong length -= bytes; 1300afc51aaaSDarrick J. Wong 13018fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 1302*a5f31a50SDarrick J. Wong } while (length > 0); 1303afc51aaaSDarrick J. Wong 1304afc51aaaSDarrick J. Wong return written; 1305afc51aaaSDarrick J. Wong } 1306afc51aaaSDarrick J. Wong 1307afc51aaaSDarrick J. Wong int 13083590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1309afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1310afc51aaaSDarrick J. Wong { 13118fc274d1SChristoph Hellwig struct iomap_iter iter = { 13128fc274d1SChristoph Hellwig .inode = inode, 13138fc274d1SChristoph Hellwig .pos = pos, 13148fc274d1SChristoph Hellwig .len = len, 1315b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE, 13168fc274d1SChristoph Hellwig }; 13178fc274d1SChristoph Hellwig int ret; 1318afc51aaaSDarrick J. Wong 13198fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 13208fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter); 1321afc51aaaSDarrick J. Wong return ret; 1322afc51aaaSDarrick J. Wong } 13233590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 1324afc51aaaSDarrick J. Wong 13252aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 1326afc51aaaSDarrick J. Wong { 1327fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 13282aa3048eSChristoph Hellwig loff_t pos = iter->pos; 13292aa3048eSChristoph Hellwig loff_t length = iomap_length(iter); 1330afc51aaaSDarrick J. Wong loff_t written = 0; 1331afc51aaaSDarrick J. Wong 1332afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 1333c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 133481ee8e52SMatthew Wilcox (Oracle) return length; 1335afc51aaaSDarrick J. Wong 1336afc51aaaSDarrick J. Wong do { 13374d7bd0ebSMatthew Wilcox (Oracle) struct folio *folio; 13384d7bd0ebSMatthew Wilcox (Oracle) int status; 13394d7bd0ebSMatthew Wilcox (Oracle) size_t offset; 13404d7bd0ebSMatthew Wilcox (Oracle) size_t bytes = min_t(u64, SIZE_MAX, length); 1341afc51aaaSDarrick J. Wong 13424d7bd0ebSMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 13434d7bd0ebSMatthew Wilcox (Oracle) if (status) 13444d7bd0ebSMatthew Wilcox (Oracle) return status; 1345d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE) 1346d7b64041SDave Chinner break; 13474d7bd0ebSMatthew Wilcox (Oracle) 13484d7bd0ebSMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 13494d7bd0ebSMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 13504d7bd0ebSMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 13514d7bd0ebSMatthew Wilcox (Oracle) 13524d7bd0ebSMatthew Wilcox (Oracle) folio_zero_range(folio, offset, bytes); 13534d7bd0ebSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 13544d7bd0ebSMatthew Wilcox (Oracle) 13554d7bd0ebSMatthew Wilcox (Oracle) bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 13564d7bd0ebSMatthew Wilcox (Oracle) if (WARN_ON_ONCE(bytes == 0)) 13574d7bd0ebSMatthew Wilcox (Oracle) return -EIO; 1358afc51aaaSDarrick J. Wong 1359afc51aaaSDarrick J. Wong pos += bytes; 136081ee8e52SMatthew Wilcox (Oracle) length -= bytes; 1361afc51aaaSDarrick J. Wong written += bytes; 136281ee8e52SMatthew Wilcox (Oracle) } while (length > 0); 1363afc51aaaSDarrick J. Wong 136498eb8d95SKaixu Xia if (did_zero) 136598eb8d95SKaixu Xia *did_zero = true; 1366afc51aaaSDarrick J. Wong return written; 1367afc51aaaSDarrick J. Wong } 1368afc51aaaSDarrick J. Wong 1369afc51aaaSDarrick J. Wong int 1370afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1371afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1372afc51aaaSDarrick J. Wong { 13732aa3048eSChristoph Hellwig struct iomap_iter iter = { 13742aa3048eSChristoph Hellwig .inode = inode, 13752aa3048eSChristoph Hellwig .pos = pos, 13762aa3048eSChristoph Hellwig .len = len, 13772aa3048eSChristoph Hellwig .flags = IOMAP_ZERO, 13782aa3048eSChristoph Hellwig }; 13792aa3048eSChristoph Hellwig int ret; 1380afc51aaaSDarrick J. Wong 13812aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 13822aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero); 1383afc51aaaSDarrick J. Wong return ret; 1384afc51aaaSDarrick J. Wong } 1385afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 1386afc51aaaSDarrick J. Wong 1387afc51aaaSDarrick J. Wong int 1388afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1389afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1390afc51aaaSDarrick J. Wong { 1391afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 1392afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 1393afc51aaaSDarrick J. Wong 1394afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 1395afc51aaaSDarrick J. Wong if (!off) 1396afc51aaaSDarrick J. Wong return 0; 1397afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1398afc51aaaSDarrick J. Wong } 1399afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 1400afc51aaaSDarrick J. Wong 1401ea0f843aSMatthew Wilcox (Oracle) static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 1402ea0f843aSMatthew Wilcox (Oracle) struct folio *folio) 1403afc51aaaSDarrick J. Wong { 1404253564baSChristoph Hellwig loff_t length = iomap_length(iter); 1405afc51aaaSDarrick J. Wong int ret; 1406afc51aaaSDarrick J. Wong 1407253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 1408d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL, 1409253564baSChristoph Hellwig &iter->iomap); 1410afc51aaaSDarrick J. Wong if (ret) 1411afc51aaaSDarrick J. Wong return ret; 1412ea0f843aSMatthew Wilcox (Oracle) block_commit_write(&folio->page, 0, length); 1413afc51aaaSDarrick J. Wong } else { 1414ea0f843aSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 1415ea0f843aSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 1416afc51aaaSDarrick J. Wong } 1417afc51aaaSDarrick J. Wong 1418afc51aaaSDarrick J. Wong return length; 1419afc51aaaSDarrick J. Wong } 1420afc51aaaSDarrick J. Wong 1421afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1422afc51aaaSDarrick J. Wong { 1423253564baSChristoph Hellwig struct iomap_iter iter = { 1424253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file), 1425253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT, 1426253564baSChristoph Hellwig }; 1427ea0f843aSMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page); 1428afc51aaaSDarrick J. Wong ssize_t ret; 1429afc51aaaSDarrick J. Wong 1430ea0f843aSMatthew Wilcox (Oracle) folio_lock(folio); 1431ea0f843aSMatthew Wilcox (Oracle) ret = folio_mkwrite_check_truncate(folio, iter.inode); 1432243145bcSAndreas Gruenbacher if (ret < 0) 1433afc51aaaSDarrick J. Wong goto out_unlock; 1434ea0f843aSMatthew Wilcox (Oracle) iter.pos = folio_pos(folio); 1435253564baSChristoph Hellwig iter.len = ret; 1436253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1437ea0f843aSMatthew Wilcox (Oracle) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1438afc51aaaSDarrick J. Wong 1439253564baSChristoph Hellwig if (ret < 0) 1440afc51aaaSDarrick J. Wong goto out_unlock; 1441ea0f843aSMatthew Wilcox (Oracle) folio_wait_stable(folio); 1442afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1443afc51aaaSDarrick J. Wong out_unlock: 1444ea0f843aSMatthew Wilcox (Oracle) folio_unlock(folio); 14452ba39cc4SChristoph Hellwig return vmf_fs_error(ret); 1446afc51aaaSDarrick J. Wong } 1447afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1448598ecfbaSChristoph Hellwig 14498ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 14508ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 1451598ecfbaSChristoph Hellwig { 145204f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 1453598ecfbaSChristoph Hellwig 1454598ecfbaSChristoph Hellwig if (error) { 14558ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 1456b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1457598ecfbaSChristoph Hellwig } 1458598ecfbaSChristoph Hellwig 145904f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); 146004f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); 1461598ecfbaSChristoph Hellwig 146204f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) 14638ffd74e9SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1464598ecfbaSChristoph Hellwig } 1465598ecfbaSChristoph Hellwig 1466598ecfbaSChristoph Hellwig /* 1467598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1468598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1469598ecfbaSChristoph Hellwig * ioend after this. 1470598ecfbaSChristoph Hellwig */ 1471ebb7fb15SDave Chinner static u32 1472598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1473598ecfbaSChristoph Hellwig { 1474598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1475598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1476598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1477598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1478c275779fSZorro Lang loff_t offset = ioend->io_offset; 1479598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1480ebb7fb15SDave Chinner u32 folio_count = 0; 1481598ecfbaSChristoph Hellwig 1482598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 14838ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 1484598ecfbaSChristoph Hellwig 1485598ecfbaSChristoph Hellwig /* 1486598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1487598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1488598ecfbaSChristoph Hellwig */ 1489598ecfbaSChristoph Hellwig if (bio == last) 1490598ecfbaSChristoph Hellwig next = NULL; 1491598ecfbaSChristoph Hellwig else 1492598ecfbaSChristoph Hellwig next = bio->bi_private; 1493598ecfbaSChristoph Hellwig 14948ffd74e9SMatthew Wilcox (Oracle) /* walk all folios in bio, ending page IO on them */ 1495ebb7fb15SDave Chinner bio_for_each_folio_all(fi, bio) { 14968ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_write(inode, fi.folio, fi.length, 14978ffd74e9SMatthew Wilcox (Oracle) error); 1498ebb7fb15SDave Chinner folio_count++; 1499ebb7fb15SDave Chinner } 1500598ecfbaSChristoph Hellwig bio_put(bio); 1501598ecfbaSChristoph Hellwig } 1502c275779fSZorro Lang /* The ioend has been freed by bio_put() */ 1503598ecfbaSChristoph Hellwig 1504598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1505598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 15069cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 1507c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start); 1508598ecfbaSChristoph Hellwig } 1509ebb7fb15SDave Chinner return folio_count; 1510598ecfbaSChristoph Hellwig } 1511598ecfbaSChristoph Hellwig 1512ebb7fb15SDave Chinner /* 1513ebb7fb15SDave Chinner * Ioend completion routine for merged bios. This can only be called from task 1514ebb7fb15SDave Chinner * contexts as merged ioends can be of unbound length. Hence we have to break up 1515ebb7fb15SDave Chinner * the writeback completions into manageable chunks to avoid long scheduler 1516ebb7fb15SDave Chinner * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1517ebb7fb15SDave Chinner * good batch processing throughput without creating adverse scheduler latency 1518ebb7fb15SDave Chinner * conditions. 1519ebb7fb15SDave Chinner */ 1520598ecfbaSChristoph Hellwig void 1521598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1522598ecfbaSChristoph Hellwig { 1523598ecfbaSChristoph Hellwig struct list_head tmp; 1524ebb7fb15SDave Chinner u32 completions; 1525ebb7fb15SDave Chinner 1526ebb7fb15SDave Chinner might_sleep(); 1527598ecfbaSChristoph Hellwig 1528598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1529ebb7fb15SDave Chinner completions = iomap_finish_ioend(ioend, error); 1530598ecfbaSChristoph Hellwig 1531598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1532ebb7fb15SDave Chinner if (completions > IOEND_BATCH_SIZE * 8) { 1533ebb7fb15SDave Chinner cond_resched(); 1534ebb7fb15SDave Chinner completions = 0; 1535ebb7fb15SDave Chinner } 1536598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1537598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1538ebb7fb15SDave Chinner completions += iomap_finish_ioend(ioend, error); 1539598ecfbaSChristoph Hellwig } 1540598ecfbaSChristoph Hellwig } 1541598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1542598ecfbaSChristoph Hellwig 1543598ecfbaSChristoph Hellwig /* 1544598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1545598ecfbaSChristoph Hellwig */ 1546598ecfbaSChristoph Hellwig static bool 1547598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1548598ecfbaSChristoph Hellwig { 1549598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1550598ecfbaSChristoph Hellwig return false; 1551598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1552598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1553598ecfbaSChristoph Hellwig return false; 1554598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1555598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1556598ecfbaSChristoph Hellwig return false; 1557598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1558598ecfbaSChristoph Hellwig return false; 1559ebb7fb15SDave Chinner /* 1560ebb7fb15SDave Chinner * Do not merge physically discontiguous ioends. The filesystem 1561ebb7fb15SDave Chinner * completion functions will have to iterate the physical 1562ebb7fb15SDave Chinner * discontiguities even if we merge the ioends at a logical level, so 1563ebb7fb15SDave Chinner * we don't gain anything by merging physical discontiguities here. 1564ebb7fb15SDave Chinner * 1565ebb7fb15SDave Chinner * We cannot use bio->bi_iter.bi_sector here as it is modified during 1566ebb7fb15SDave Chinner * submission so does not point to the start sector of the bio at 1567ebb7fb15SDave Chinner * completion. 1568ebb7fb15SDave Chinner */ 1569ebb7fb15SDave Chinner if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1570ebb7fb15SDave Chinner return false; 1571598ecfbaSChristoph Hellwig return true; 1572598ecfbaSChristoph Hellwig } 1573598ecfbaSChristoph Hellwig 1574598ecfbaSChristoph Hellwig void 15756e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1576598ecfbaSChristoph Hellwig { 1577598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1578598ecfbaSChristoph Hellwig 1579598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1580598ecfbaSChristoph Hellwig 1581598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1582598ecfbaSChristoph Hellwig io_list))) { 1583598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1584598ecfbaSChristoph Hellwig break; 1585598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1586598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1587598ecfbaSChristoph Hellwig } 1588598ecfbaSChristoph Hellwig } 1589598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1590598ecfbaSChristoph Hellwig 1591598ecfbaSChristoph Hellwig static int 15924f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a, 15934f0f586bSSami Tolvanen const struct list_head *b) 1594598ecfbaSChristoph Hellwig { 1595b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1596b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1597598ecfbaSChristoph Hellwig 1598598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1599598ecfbaSChristoph Hellwig return -1; 1600b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1601598ecfbaSChristoph Hellwig return 1; 1602598ecfbaSChristoph Hellwig return 0; 1603598ecfbaSChristoph Hellwig } 1604598ecfbaSChristoph Hellwig 1605598ecfbaSChristoph Hellwig void 1606598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1607598ecfbaSChristoph Hellwig { 1608598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1609598ecfbaSChristoph Hellwig } 1610598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1611598ecfbaSChristoph Hellwig 1612598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1613598ecfbaSChristoph Hellwig { 1614598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1615598ecfbaSChristoph Hellwig 1616598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1617598ecfbaSChristoph Hellwig } 1618598ecfbaSChristoph Hellwig 1619598ecfbaSChristoph Hellwig /* 1620598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1621598ecfbaSChristoph Hellwig * 1622598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1623f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback 1624598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1625598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1626598ecfbaSChristoph Hellwig */ 1627598ecfbaSChristoph Hellwig static int 1628598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1629598ecfbaSChristoph Hellwig int error) 1630598ecfbaSChristoph Hellwig { 1631598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1632598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1633598ecfbaSChristoph Hellwig 1634598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1635598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1636598ecfbaSChristoph Hellwig if (error) { 1637598ecfbaSChristoph Hellwig /* 1638f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an 1639598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1640598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1641598ecfbaSChristoph Hellwig * time. 1642598ecfbaSChristoph Hellwig */ 1643598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1644598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1645598ecfbaSChristoph Hellwig return error; 1646598ecfbaSChristoph Hellwig } 1647598ecfbaSChristoph Hellwig 1648598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1649598ecfbaSChristoph Hellwig return 0; 1650598ecfbaSChristoph Hellwig } 1651598ecfbaSChristoph Hellwig 1652598ecfbaSChristoph Hellwig static struct iomap_ioend * 1653598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1654598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1655598ecfbaSChristoph Hellwig { 1656598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1657598ecfbaSChristoph Hellwig struct bio *bio; 1658598ecfbaSChristoph Hellwig 1659609be106SChristoph Hellwig bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1660609be106SChristoph Hellwig REQ_OP_WRITE | wbc_to_write_flags(wbc), 1661609be106SChristoph Hellwig GFP_NOFS, &iomap_ioend_bioset); 1662598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1663598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1664598ecfbaSChristoph Hellwig 1665598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1666598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1667598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1668598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1669598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1670598ecfbaSChristoph Hellwig ioend->io_size = 0; 1671ebb7fb15SDave Chinner ioend->io_folios = 0; 1672598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1673598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1674ebb7fb15SDave Chinner ioend->io_sector = sector; 1675598ecfbaSChristoph Hellwig return ioend; 1676598ecfbaSChristoph Hellwig } 1677598ecfbaSChristoph Hellwig 1678598ecfbaSChristoph Hellwig /* 1679598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1680598ecfbaSChristoph Hellwig * 1681f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order 1682598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1683598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1684598ecfbaSChristoph Hellwig */ 1685598ecfbaSChristoph Hellwig static struct bio * 1686598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1687598ecfbaSChristoph Hellwig { 1688598ecfbaSChristoph Hellwig struct bio *new; 1689598ecfbaSChristoph Hellwig 169007888c66SChristoph Hellwig new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 169107888c66SChristoph Hellwig bio_clone_blkg_association(new, prev); 1692598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1693598ecfbaSChristoph Hellwig 1694598ecfbaSChristoph Hellwig bio_chain(prev, new); 1695598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1696598ecfbaSChristoph Hellwig submit_bio(prev); 1697598ecfbaSChristoph Hellwig return new; 1698598ecfbaSChristoph Hellwig } 1699598ecfbaSChristoph Hellwig 1700598ecfbaSChristoph Hellwig static bool 1701598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1702598ecfbaSChristoph Hellwig sector_t sector) 1703598ecfbaSChristoph Hellwig { 1704598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1705598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1706598ecfbaSChristoph Hellwig return false; 1707598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1708598ecfbaSChristoph Hellwig return false; 1709598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1710598ecfbaSChristoph Hellwig return false; 1711598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1712598ecfbaSChristoph Hellwig return false; 1713ebb7fb15SDave Chinner /* 1714ebb7fb15SDave Chinner * Limit ioend bio chain lengths to minimise IO completion latency. This 1715ebb7fb15SDave Chinner * also prevents long tight loops ending page writeback on all the 1716ebb7fb15SDave Chinner * folios in the ioend. 1717ebb7fb15SDave Chinner */ 1718ebb7fb15SDave Chinner if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1719ebb7fb15SDave Chinner return false; 1720598ecfbaSChristoph Hellwig return true; 1721598ecfbaSChristoph Hellwig } 1722598ecfbaSChristoph Hellwig 1723598ecfbaSChristoph Hellwig /* 1724598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1725f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another. 1726598ecfbaSChristoph Hellwig */ 1727598ecfbaSChristoph Hellwig static void 1728e735c007SMatthew Wilcox (Oracle) iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 172904f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc, 1730598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1731598ecfbaSChristoph Hellwig { 1732e735c007SMatthew Wilcox (Oracle) sector_t sector = iomap_sector(&wpc->iomap, pos); 1733598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1734e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, pos); 1735598ecfbaSChristoph Hellwig 1736e735c007SMatthew Wilcox (Oracle) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1737598ecfbaSChristoph Hellwig if (wpc->ioend) 1738598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1739e735c007SMatthew Wilcox (Oracle) wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1740598ecfbaSChristoph Hellwig } 1741598ecfbaSChristoph Hellwig 1742e735c007SMatthew Wilcox (Oracle) if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1743c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1744c2478469SJohannes Thumshirn bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff); 1745c1b79f11SChristoph Hellwig } 1746c1b79f11SChristoph Hellwig 174704f52c4eSRitesh Harjani (IBM) if (ifs) 174804f52c4eSRitesh Harjani (IBM) atomic_add(len, &ifs->write_bytes_pending); 1749598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1750e735c007SMatthew Wilcox (Oracle) wbc_account_cgroup_owner(wbc, &folio->page, len); 1751598ecfbaSChristoph Hellwig } 1752598ecfbaSChristoph Hellwig 1753598ecfbaSChristoph Hellwig /* 1754598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1755598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1756f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're 1757f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block 1758f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that 1759598ecfbaSChristoph Hellwig * instead. 1760598ecfbaSChristoph Hellwig * 1761598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1762598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1763598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1764598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1765598ecfbaSChristoph Hellwig * 1766598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1767598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1768598ecfbaSChristoph Hellwig */ 1769598ecfbaSChristoph Hellwig static int 1770598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1771598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1772e735c007SMatthew Wilcox (Oracle) struct folio *folio, u64 end_pos) 1773598ecfbaSChristoph Hellwig { 17744ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private; 1775598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1776598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 177792655036SMatthew Wilcox (Oracle) unsigned nblocks = i_blocks_per_folio(inode, folio); 177892655036SMatthew Wilcox (Oracle) u64 pos = folio_pos(folio); 1779598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1780598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1781598ecfbaSChristoph Hellwig 17824ce02c67SRitesh Harjani (IBM) WARN_ON_ONCE(end_pos <= pos); 17834ce02c67SRitesh Harjani (IBM) 17844ce02c67SRitesh Harjani (IBM) if (!ifs && nblocks > 1) { 17854ce02c67SRitesh Harjani (IBM) ifs = ifs_alloc(inode, folio, 0); 17864ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, 0, end_pos - pos); 17874ce02c67SRitesh Harjani (IBM) } 17884ce02c67SRitesh Harjani (IBM) 178904f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0); 1790598ecfbaSChristoph Hellwig 1791598ecfbaSChristoph Hellwig /* 179292655036SMatthew Wilcox (Oracle) * Walk through the folio to find areas to write back. If we 179392655036SMatthew Wilcox (Oracle) * run off the end of the current map or find the current map 179492655036SMatthew Wilcox (Oracle) * invalid, grab a new one. 1795598ecfbaSChristoph Hellwig */ 179692655036SMatthew Wilcox (Oracle) for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 17974ce02c67SRitesh Harjani (IBM) if (ifs && !ifs_block_is_dirty(folio, ifs, i)) 1798598ecfbaSChristoph Hellwig continue; 1799598ecfbaSChristoph Hellwig 180092655036SMatthew Wilcox (Oracle) error = wpc->ops->map_blocks(wpc, inode, pos); 1801598ecfbaSChristoph Hellwig if (error) 1802598ecfbaSChristoph Hellwig break; 1803adc9c2e5SDarrick J. Wong trace_iomap_writepage_map(inode, &wpc->iomap); 18043e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 18053e19e6f3SChristoph Hellwig continue; 1806598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1807598ecfbaSChristoph Hellwig continue; 180804f52c4eSRitesh Harjani (IBM) iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc, 1809598ecfbaSChristoph Hellwig &submit_list); 1810598ecfbaSChristoph Hellwig count++; 1811598ecfbaSChristoph Hellwig } 1812ebb7fb15SDave Chinner if (count) 1813ebb7fb15SDave Chinner wpc->ioend->io_folios++; 1814598ecfbaSChristoph Hellwig 1815598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1816e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_locked(folio)); 1817e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 1818e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_dirty(folio)); 1819598ecfbaSChristoph Hellwig 1820598ecfbaSChristoph Hellwig /* 1821598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1822598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1823598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1824598ecfbaSChristoph Hellwig * appropriately. 1825598ecfbaSChristoph Hellwig */ 1826598ecfbaSChristoph Hellwig if (unlikely(error)) { 1827598ecfbaSChristoph Hellwig /* 1828763e4cdcSBrian Foster * Let the filesystem know what portion of the current page 1829f1f264b4SAndreas Gruenbacher * failed to map. If the page hasn't been added to ioend, it 1830763e4cdcSBrian Foster * won't be affected by I/O completion and we must unlock it 1831763e4cdcSBrian Foster * now. 1832598ecfbaSChristoph Hellwig */ 18336e478521SMatthew Wilcox (Oracle) if (wpc->ops->discard_folio) 183492655036SMatthew Wilcox (Oracle) wpc->ops->discard_folio(folio, pos); 1835763e4cdcSBrian Foster if (!count) { 1836e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1837598ecfbaSChristoph Hellwig goto done; 1838598ecfbaSChristoph Hellwig } 1839598ecfbaSChristoph Hellwig } 1840598ecfbaSChristoph Hellwig 18414ce02c67SRitesh Harjani (IBM) /* 18424ce02c67SRitesh Harjani (IBM) * We can have dirty bits set past end of file in page_mkwrite path 18434ce02c67SRitesh Harjani (IBM) * while mapping the last partial folio. Hence it's better to clear 18444ce02c67SRitesh Harjani (IBM) * all the dirty bits in the folio here. 18454ce02c67SRitesh Harjani (IBM) */ 18464ce02c67SRitesh Harjani (IBM) iomap_clear_range_dirty(folio, 0, folio_size(folio)); 1847e735c007SMatthew Wilcox (Oracle) folio_start_writeback(folio); 1848e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1849598ecfbaSChristoph Hellwig 1850598ecfbaSChristoph Hellwig /* 1851f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch 1852598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1853598ecfbaSChristoph Hellwig * submissions. 1854598ecfbaSChristoph Hellwig */ 1855598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1856598ecfbaSChristoph Hellwig int error2; 1857598ecfbaSChristoph Hellwig 1858598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1859598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1860598ecfbaSChristoph Hellwig if (error2 && !error) 1861598ecfbaSChristoph Hellwig error = error2; 1862598ecfbaSChristoph Hellwig } 1863598ecfbaSChristoph Hellwig 1864598ecfbaSChristoph Hellwig /* 1865598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1866598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1867598ecfbaSChristoph Hellwig */ 1868598ecfbaSChristoph Hellwig if (!count) 1869e735c007SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1870598ecfbaSChristoph Hellwig done: 18713d5f3ba1SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1872598ecfbaSChristoph Hellwig return error; 1873598ecfbaSChristoph Hellwig } 1874598ecfbaSChristoph Hellwig 1875598ecfbaSChristoph Hellwig /* 1876598ecfbaSChristoph Hellwig * Write out a dirty page. 1877598ecfbaSChristoph Hellwig * 1878f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it. 1879f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to 1880598ecfbaSChristoph Hellwig * regular allocated space. 1881598ecfbaSChristoph Hellwig */ 1882d585bdbeSMatthew Wilcox (Oracle) static int iomap_do_writepage(struct folio *folio, 1883d585bdbeSMatthew Wilcox (Oracle) struct writeback_control *wbc, void *data) 1884598ecfbaSChristoph Hellwig { 1885598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1886e735c007SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 188781d4782aSMatthew Wilcox (Oracle) u64 end_pos, isize; 1888598ecfbaSChristoph Hellwig 1889e735c007SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1890598ecfbaSChristoph Hellwig 1891598ecfbaSChristoph Hellwig /* 1892e735c007SMatthew Wilcox (Oracle) * Refuse to write the folio out if we're called from reclaim context. 1893598ecfbaSChristoph Hellwig * 1894598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1895598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1896598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1897598ecfbaSChristoph Hellwig * 1898598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1899598ecfbaSChristoph Hellwig * warn about it. 1900598ecfbaSChristoph Hellwig */ 1901598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1902598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1903598ecfbaSChristoph Hellwig goto redirty; 1904598ecfbaSChristoph Hellwig 1905598ecfbaSChristoph Hellwig /* 1906e735c007SMatthew Wilcox (Oracle) * Is this folio beyond the end of the file? 1907598ecfbaSChristoph Hellwig * 1908e735c007SMatthew Wilcox (Oracle) * The folio index is less than the end_index, adjust the end_pos 1909e735c007SMatthew Wilcox (Oracle) * to the highest offset that this folio should represent. 1910598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1911598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1912598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1913598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1914598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1915598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1916598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1917598ecfbaSChristoph Hellwig */ 191881d4782aSMatthew Wilcox (Oracle) isize = i_size_read(inode); 1919e735c007SMatthew Wilcox (Oracle) end_pos = folio_pos(folio) + folio_size(folio); 192081d4782aSMatthew Wilcox (Oracle) if (end_pos > isize) { 1921598ecfbaSChristoph Hellwig /* 1922598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1923598ecfbaSChristoph Hellwig * i_size or not. 1924598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1925598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1926598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1927598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1928598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1929598ecfbaSChristoph Hellwig * | | Straddles | 1930598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1931598ecfbaSChristoph Hellwig */ 1932e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, isize); 193381d4782aSMatthew Wilcox (Oracle) pgoff_t end_index = isize >> PAGE_SHIFT; 1934598ecfbaSChristoph Hellwig 1935598ecfbaSChristoph Hellwig /* 1936d58562caSChris Mason * Skip the page if it's fully outside i_size, e.g. 1937d58562caSChris Mason * due to a truncate operation that's in progress. We've 1938d58562caSChris Mason * cleaned this page and truncate will finish things off for 1939d58562caSChris Mason * us. 1940598ecfbaSChristoph Hellwig * 1941f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given 1942f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we 1943f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with 1944f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would 1945f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be 1946f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in 1947f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation 1948f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by 1949f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its 1950598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1951598ecfbaSChristoph Hellwig */ 1952e735c007SMatthew Wilcox (Oracle) if (folio->index > end_index || 1953e735c007SMatthew Wilcox (Oracle) (folio->index == end_index && poff == 0)) 1954d58562caSChris Mason goto unlock; 1955598ecfbaSChristoph Hellwig 1956598ecfbaSChristoph Hellwig /* 1957598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1958598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1959598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1960598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1961598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1962598ecfbaSChristoph Hellwig * not written out to the file." 1963598ecfbaSChristoph Hellwig */ 1964e735c007SMatthew Wilcox (Oracle) folio_zero_segment(folio, poff, folio_size(folio)); 196581d4782aSMatthew Wilcox (Oracle) end_pos = isize; 1966598ecfbaSChristoph Hellwig } 1967598ecfbaSChristoph Hellwig 1968e735c007SMatthew Wilcox (Oracle) return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1969598ecfbaSChristoph Hellwig 1970598ecfbaSChristoph Hellwig redirty: 1971e735c007SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio); 1972d58562caSChris Mason unlock: 1973e735c007SMatthew Wilcox (Oracle) folio_unlock(folio); 1974598ecfbaSChristoph Hellwig return 0; 1975598ecfbaSChristoph Hellwig } 1976598ecfbaSChristoph Hellwig 1977598ecfbaSChristoph Hellwig int 1978598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1979598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1980598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1981598ecfbaSChristoph Hellwig { 1982598ecfbaSChristoph Hellwig int ret; 1983598ecfbaSChristoph Hellwig 1984598ecfbaSChristoph Hellwig wpc->ops = ops; 1985598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1986598ecfbaSChristoph Hellwig if (!wpc->ioend) 1987598ecfbaSChristoph Hellwig return ret; 1988598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1989598ecfbaSChristoph Hellwig } 1990598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1991598ecfbaSChristoph Hellwig 1992598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1993598ecfbaSChristoph Hellwig { 1994598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1995598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1996598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1997598ecfbaSChristoph Hellwig } 1998598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1999