1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0
2afc51aaaSDarrick J. Wong /*
3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc.
4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig.
5afc51aaaSDarrick J. Wong */
6afc51aaaSDarrick J. Wong #include <linux/module.h>
7afc51aaaSDarrick J. Wong #include <linux/compiler.h>
8afc51aaaSDarrick J. Wong #include <linux/fs.h>
9afc51aaaSDarrick J. Wong #include <linux/iomap.h>
10afc51aaaSDarrick J. Wong #include <linux/pagemap.h>
11afc51aaaSDarrick J. Wong #include <linux/uio.h>
12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h>
13afc51aaaSDarrick J. Wong #include <linux/dax.h>
14afc51aaaSDarrick J. Wong #include <linux/writeback.h>
15598ecfbaSChristoph Hellwig #include <linux/list_sort.h>
16afc51aaaSDarrick J. Wong #include <linux/swap.h>
17afc51aaaSDarrick J. Wong #include <linux/bio.h>
18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h>
19afc51aaaSDarrick J. Wong #include <linux/migrate.h>
209e91c572SChristoph Hellwig #include "trace.h"
21afc51aaaSDarrick J. Wong
22afc51aaaSDarrick J. Wong #include "../internal.h"
23afc51aaaSDarrick J. Wong
24ebb7fb15SDave Chinner #define IOEND_BATCH_SIZE 4096
25ebb7fb15SDave Chinner
260af2b37dSRitesh Harjani (IBM) typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
27ab08b01eSChristoph Hellwig /*
284ce02c67SRitesh Harjani (IBM) * Structure allocated for each folio to track per-block uptodate, dirty state
2904f52c4eSRitesh Harjani (IBM) * and I/O completions.
30ab08b01eSChristoph Hellwig */
3104f52c4eSRitesh Harjani (IBM) struct iomap_folio_state {
327d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending;
330fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending;
3404f52c4eSRitesh Harjani (IBM) spinlock_t state_lock;
354ce02c67SRitesh Harjani (IBM)
364ce02c67SRitesh Harjani (IBM) /*
374ce02c67SRitesh Harjani (IBM) * Each block has two bits in this bitmap:
384ce02c67SRitesh Harjani (IBM) * Bits [0..blocks_per_folio) has the uptodate status.
394ce02c67SRitesh Harjani (IBM) * Bits [b_p_f...(2*b_p_f)) has the dirty status.
404ce02c67SRitesh Harjani (IBM) */
4104f52c4eSRitesh Harjani (IBM) unsigned long state[];
42ab08b01eSChristoph Hellwig };
43ab08b01eSChristoph Hellwig
44598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset;
45598ecfbaSChristoph Hellwig
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)46cc86181aSRitesh Harjani (IBM) static inline bool ifs_is_fully_uptodate(struct folio *folio,
47cc86181aSRitesh Harjani (IBM) struct iomap_folio_state *ifs)
48cc86181aSRitesh Harjani (IBM) {
49cc86181aSRitesh Harjani (IBM) struct inode *inode = folio->mapping->host;
50cc86181aSRitesh Harjani (IBM)
51cc86181aSRitesh Harjani (IBM) return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
52cc86181aSRitesh Harjani (IBM) }
53cc86181aSRitesh Harjani (IBM)
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)54cc86181aSRitesh Harjani (IBM) static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
55cc86181aSRitesh Harjani (IBM) unsigned int block)
56cc86181aSRitesh Harjani (IBM) {
57cc86181aSRitesh Harjani (IBM) return test_bit(block, ifs->state);
58cc86181aSRitesh Harjani (IBM) }
59cc86181aSRitesh Harjani (IBM)
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)603ea5c76cSRitesh Harjani (IBM) static void ifs_set_range_uptodate(struct folio *folio,
613ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len)
623ea5c76cSRitesh Harjani (IBM) {
633ea5c76cSRitesh Harjani (IBM) struct inode *inode = folio->mapping->host;
643ea5c76cSRitesh Harjani (IBM) unsigned int first_blk = off >> inode->i_blkbits;
653ea5c76cSRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
663ea5c76cSRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1;
673ea5c76cSRitesh Harjani (IBM) unsigned long flags;
683ea5c76cSRitesh Harjani (IBM)
693ea5c76cSRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags);
703ea5c76cSRitesh Harjani (IBM) bitmap_set(ifs->state, first_blk, nr_blks);
71cc86181aSRitesh Harjani (IBM) if (ifs_is_fully_uptodate(folio, ifs))
723ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio);
733ea5c76cSRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags);
743ea5c76cSRitesh Harjani (IBM) }
753ea5c76cSRitesh Harjani (IBM)
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)763ea5c76cSRitesh Harjani (IBM) static void iomap_set_range_uptodate(struct folio *folio, size_t off,
773ea5c76cSRitesh Harjani (IBM) size_t len)
783ea5c76cSRitesh Harjani (IBM) {
793ea5c76cSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
803ea5c76cSRitesh Harjani (IBM)
813ea5c76cSRitesh Harjani (IBM) if (ifs)
823ea5c76cSRitesh Harjani (IBM) ifs_set_range_uptodate(folio, ifs, off, len);
833ea5c76cSRitesh Harjani (IBM) else
843ea5c76cSRitesh Harjani (IBM) folio_mark_uptodate(folio);
853ea5c76cSRitesh Harjani (IBM) }
863ea5c76cSRitesh Harjani (IBM)
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)874ce02c67SRitesh Harjani (IBM) static inline bool ifs_block_is_dirty(struct folio *folio,
884ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, int block)
894ce02c67SRitesh Harjani (IBM) {
904ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host;
914ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
924ce02c67SRitesh Harjani (IBM)
934ce02c67SRitesh Harjani (IBM) return test_bit(block + blks_per_folio, ifs->state);
944ce02c67SRitesh Harjani (IBM) }
954ce02c67SRitesh Harjani (IBM)
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)964ce02c67SRitesh Harjani (IBM) static void ifs_clear_range_dirty(struct folio *folio,
974ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len)
984ce02c67SRitesh Harjani (IBM) {
994ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host;
1004ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
1014ce02c67SRitesh Harjani (IBM) unsigned int first_blk = (off >> inode->i_blkbits);
1024ce02c67SRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
1034ce02c67SRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1;
1044ce02c67SRitesh Harjani (IBM) unsigned long flags;
1054ce02c67SRitesh Harjani (IBM)
1064ce02c67SRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags);
1074ce02c67SRitesh Harjani (IBM) bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
1084ce02c67SRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags);
1094ce02c67SRitesh Harjani (IBM) }
1104ce02c67SRitesh Harjani (IBM)
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)1114ce02c67SRitesh Harjani (IBM) static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
1124ce02c67SRitesh Harjani (IBM) {
1134ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
1144ce02c67SRitesh Harjani (IBM)
1154ce02c67SRitesh Harjani (IBM) if (ifs)
1164ce02c67SRitesh Harjani (IBM) ifs_clear_range_dirty(folio, ifs, off, len);
1174ce02c67SRitesh Harjani (IBM) }
1184ce02c67SRitesh Harjani (IBM)
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)1194ce02c67SRitesh Harjani (IBM) static void ifs_set_range_dirty(struct folio *folio,
1204ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs, size_t off, size_t len)
1214ce02c67SRitesh Harjani (IBM) {
1224ce02c67SRitesh Harjani (IBM) struct inode *inode = folio->mapping->host;
1234ce02c67SRitesh Harjani (IBM) unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
1244ce02c67SRitesh Harjani (IBM) unsigned int first_blk = (off >> inode->i_blkbits);
1254ce02c67SRitesh Harjani (IBM) unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
1264ce02c67SRitesh Harjani (IBM) unsigned int nr_blks = last_blk - first_blk + 1;
1274ce02c67SRitesh Harjani (IBM) unsigned long flags;
1284ce02c67SRitesh Harjani (IBM)
1294ce02c67SRitesh Harjani (IBM) spin_lock_irqsave(&ifs->state_lock, flags);
1304ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
1314ce02c67SRitesh Harjani (IBM) spin_unlock_irqrestore(&ifs->state_lock, flags);
1324ce02c67SRitesh Harjani (IBM) }
1334ce02c67SRitesh Harjani (IBM)
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)1344ce02c67SRitesh Harjani (IBM) static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
1354ce02c67SRitesh Harjani (IBM) {
1364ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
1374ce02c67SRitesh Harjani (IBM)
1384ce02c67SRitesh Harjani (IBM) if (ifs)
1394ce02c67SRitesh Harjani (IBM) ifs_set_range_dirty(folio, ifs, off, len);
1404ce02c67SRitesh Harjani (IBM) }
1414ce02c67SRitesh Harjani (IBM)
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)14204f52c4eSRitesh Harjani (IBM) static struct iomap_folio_state *ifs_alloc(struct inode *inode,
14304f52c4eSRitesh Harjani (IBM) struct folio *folio, unsigned int flags)
144afc51aaaSDarrick J. Wong {
14504f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
146435d44b3SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
1479753b868SStefan Roesch gfp_t gfp;
148afc51aaaSDarrick J. Wong
14904f52c4eSRitesh Harjani (IBM) if (ifs || nr_blocks <= 1)
15004f52c4eSRitesh Harjani (IBM) return ifs;
151afc51aaaSDarrick J. Wong
1529753b868SStefan Roesch if (flags & IOMAP_NOWAIT)
1539753b868SStefan Roesch gfp = GFP_NOWAIT;
1549753b868SStefan Roesch else
1559753b868SStefan Roesch gfp = GFP_NOFS | __GFP_NOFAIL;
1569753b868SStefan Roesch
1574ce02c67SRitesh Harjani (IBM) /*
1584ce02c67SRitesh Harjani (IBM) * ifs->state tracks two sets of state flags when the
1594ce02c67SRitesh Harjani (IBM) * filesystem block size is smaller than the folio size.
1604ce02c67SRitesh Harjani (IBM) * The first state tracks per-block uptodate and the
1614ce02c67SRitesh Harjani (IBM) * second tracks per-block dirty state.
1624ce02c67SRitesh Harjani (IBM) */
1634ce02c67SRitesh Harjani (IBM) ifs = kzalloc(struct_size(ifs, state,
1644ce02c67SRitesh Harjani (IBM) BITS_TO_LONGS(2 * nr_blocks)), gfp);
1654ce02c67SRitesh Harjani (IBM) if (!ifs)
1664ce02c67SRitesh Harjani (IBM) return ifs;
1674ce02c67SRitesh Harjani (IBM)
16804f52c4eSRitesh Harjani (IBM) spin_lock_init(&ifs->state_lock);
169435d44b3SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
1704ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, 0, nr_blocks);
1714ce02c67SRitesh Harjani (IBM) if (folio_test_dirty(folio))
1724ce02c67SRitesh Harjani (IBM) bitmap_set(ifs->state, nr_blocks, nr_blocks);
17304f52c4eSRitesh Harjani (IBM) folio_attach_private(folio, ifs);
1744ce02c67SRitesh Harjani (IBM)
17504f52c4eSRitesh Harjani (IBM) return ifs;
176afc51aaaSDarrick J. Wong }
177afc51aaaSDarrick J. Wong
ifs_free(struct folio * folio)17804f52c4eSRitesh Harjani (IBM) static void ifs_free(struct folio *folio)
179afc51aaaSDarrick J. Wong {
18004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio_detach_private(folio);
181afc51aaaSDarrick J. Wong
18204f52c4eSRitesh Harjani (IBM) if (!ifs)
183afc51aaaSDarrick J. Wong return;
18404f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
18504f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
186cc86181aSRitesh Harjani (IBM) WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
187c46e8324SMatthew Wilcox (Oracle) folio_test_uptodate(folio));
18804f52c4eSRitesh Harjani (IBM) kfree(ifs);
189afc51aaaSDarrick J. Wong }
190afc51aaaSDarrick J. Wong
191afc51aaaSDarrick J. Wong /*
192431c0566SMatthew Wilcox (Oracle) * Calculate the range inside the folio that we actually need to read.
193afc51aaaSDarrick J. Wong */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)194431c0566SMatthew Wilcox (Oracle) static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
195431c0566SMatthew Wilcox (Oracle) loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
196afc51aaaSDarrick J. Wong {
19704f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
198afc51aaaSDarrick J. Wong loff_t orig_pos = *pos;
199afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode);
200afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits;
201afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits);
202431c0566SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, *pos);
203431c0566SMatthew Wilcox (Oracle) size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
204afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits;
205afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits;
206afc51aaaSDarrick J. Wong
207afc51aaaSDarrick J. Wong /*
208f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the
209afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed
210afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges.
211afc51aaaSDarrick J. Wong */
21204f52c4eSRitesh Harjani (IBM) if (ifs) {
213afc51aaaSDarrick J. Wong unsigned int i;
214afc51aaaSDarrick J. Wong
215afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */
216afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) {
217cc86181aSRitesh Harjani (IBM) if (!ifs_block_is_uptodate(ifs, i))
218afc51aaaSDarrick J. Wong break;
219afc51aaaSDarrick J. Wong *pos += block_size;
220afc51aaaSDarrick J. Wong poff += block_size;
221afc51aaaSDarrick J. Wong plen -= block_size;
222afc51aaaSDarrick J. Wong first++;
223afc51aaaSDarrick J. Wong }
224afc51aaaSDarrick J. Wong
225afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */
226afc51aaaSDarrick J. Wong for ( ; i <= last; i++) {
227cc86181aSRitesh Harjani (IBM) if (ifs_block_is_uptodate(ifs, i)) {
228afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size;
229afc51aaaSDarrick J. Wong last = i - 1;
230afc51aaaSDarrick J. Wong break;
231afc51aaaSDarrick J. Wong }
232afc51aaaSDarrick J. Wong }
233afc51aaaSDarrick J. Wong }
234afc51aaaSDarrick J. Wong
235afc51aaaSDarrick J. Wong /*
236f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to
237afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the
238afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size.
239afc51aaaSDarrick J. Wong */
240afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) {
241431c0566SMatthew Wilcox (Oracle) unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
242afc51aaaSDarrick J. Wong
243afc51aaaSDarrick J. Wong if (first <= end && last > end)
244afc51aaaSDarrick J. Wong plen -= (last - end) * block_size;
245afc51aaaSDarrick J. Wong }
246afc51aaaSDarrick J. Wong
247afc51aaaSDarrick J. Wong *offp = poff;
248afc51aaaSDarrick J. Wong *lenp = plen;
249afc51aaaSDarrick J. Wong }
250afc51aaaSDarrick J. Wong
iomap_finish_folio_read(struct folio * folio,size_t offset,size_t len,int error)2518ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_read(struct folio *folio, size_t offset,
2528ffd74e9SMatthew Wilcox (Oracle) size_t len, int error)
253afc51aaaSDarrick J. Wong {
25404f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
255afc51aaaSDarrick J. Wong
256afc51aaaSDarrick J. Wong if (unlikely(error)) {
2578ffd74e9SMatthew Wilcox (Oracle) folio_clear_uptodate(folio);
2588ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio);
259afc51aaaSDarrick J. Wong } else {
2603ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, len);
261afc51aaaSDarrick J. Wong }
262afc51aaaSDarrick J. Wong
26304f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
2648ffd74e9SMatthew Wilcox (Oracle) folio_unlock(folio);
265afc51aaaSDarrick J. Wong }
266afc51aaaSDarrick J. Wong
iomap_read_end_io(struct bio * bio)2678ffd74e9SMatthew Wilcox (Oracle) static void iomap_read_end_io(struct bio *bio)
268afc51aaaSDarrick J. Wong {
269afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status);
2708ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi;
271afc51aaaSDarrick J. Wong
2728ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio)
2738ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
274afc51aaaSDarrick J. Wong bio_put(bio);
275afc51aaaSDarrick J. Wong }
276afc51aaaSDarrick J. Wong
277afc51aaaSDarrick J. Wong struct iomap_readpage_ctx {
2783aa9c659SMatthew Wilcox (Oracle) struct folio *cur_folio;
2793aa9c659SMatthew Wilcox (Oracle) bool cur_folio_in_bio;
280afc51aaaSDarrick J. Wong struct bio *bio;
2819d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac;
282afc51aaaSDarrick J. Wong };
283afc51aaaSDarrick J. Wong
2845ad448ceSAndreas Gruenbacher /**
2855ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache
2865ad448ceSAndreas Gruenbacher * @iter: iteration structure
287874628a2SMatthew Wilcox (Oracle) * @folio: folio to copy to
2885ad448ceSAndreas Gruenbacher *
289874628a2SMatthew Wilcox (Oracle) * Copy the inline data in @iter into @folio and zero out the rest of the folio.
2905ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file.
2915ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno.
2925ad448ceSAndreas Gruenbacher */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)2935ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter,
294874628a2SMatthew Wilcox (Oracle) struct folio *folio)
295afc51aaaSDarrick J. Wong {
296fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter);
2971b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset;
298b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset);
299431c0566SMatthew Wilcox (Oracle) size_t offset = offset_in_folio(folio, iomap->offset);
300afc51aaaSDarrick J. Wong void *addr;
301afc51aaaSDarrick J. Wong
302874628a2SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
3035ad448ceSAndreas Gruenbacher return 0;
304afc51aaaSDarrick J. Wong
305ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
306ae44f9c2SMatthew Wilcox (Oracle) return -EIO;
30769f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE -
30869f4a26cSGao Xiang offset_in_page(iomap->inline_data)))
30969f4a26cSGao Xiang return -EIO;
31069f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length))
31169f4a26cSGao Xiang return -EIO;
312431c0566SMatthew Wilcox (Oracle) if (offset > 0)
3133ea5c76cSRitesh Harjani (IBM) ifs_alloc(iter->inode, folio, iter->flags);
314afc51aaaSDarrick J. Wong
315874628a2SMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, offset);
316afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size);
317b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size);
318ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr);
3193ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
3205ad448ceSAndreas Gruenbacher return 0;
321afc51aaaSDarrick J. Wong }
322afc51aaaSDarrick J. Wong
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)323fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
3241b5c1e36SChristoph Hellwig loff_t pos)
325009d8d84SChristoph Hellwig {
326fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
3271b5c1e36SChristoph Hellwig
3281b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED ||
3291b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) ||
3301b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode);
331009d8d84SChristoph Hellwig }
332009d8d84SChristoph Hellwig
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)333fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
334f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset)
335afc51aaaSDarrick J. Wong {
336fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap;
337f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset;
338f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset;
3393aa9c659SMatthew Wilcox (Oracle) struct folio *folio = ctx->cur_folio;
34004f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs;
341afc51aaaSDarrick J. Wong loff_t orig_pos = pos;
342431c0566SMatthew Wilcox (Oracle) size_t poff, plen;
343afc51aaaSDarrick J. Wong sector_t sector;
344afc51aaaSDarrick J. Wong
3455ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE)
346874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio);
347afc51aaaSDarrick J. Wong
348afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */
34904f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags);
350431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
351afc51aaaSDarrick J. Wong if (plen == 0)
352afc51aaaSDarrick J. Wong goto done;
353afc51aaaSDarrick J. Wong
3541b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) {
355431c0566SMatthew Wilcox (Oracle) folio_zero_range(folio, poff, plen);
3563ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen);
357afc51aaaSDarrick J. Wong goto done;
358afc51aaaSDarrick J. Wong }
359afc51aaaSDarrick J. Wong
3603aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = true;
36104f52c4eSRitesh Harjani (IBM) if (ifs)
36204f52c4eSRitesh Harjani (IBM) atomic_add(plen, &ifs->read_bytes_pending);
363afc51aaaSDarrick J. Wong
364afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos);
365d0364f94SChristoph Hellwig if (!ctx->bio ||
366d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector ||
367431c0566SMatthew Wilcox (Oracle) !bio_add_folio(ctx->bio, folio, plen, poff)) {
3683aa9c659SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
369457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp;
3705f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
371afc51aaaSDarrick J. Wong
372afc51aaaSDarrick J. Wong if (ctx->bio)
373afc51aaaSDarrick J. Wong submit_bio(ctx->bio);
374afc51aaaSDarrick J. Wong
3759d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */
376afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN;
37707888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
37807888c66SChristoph Hellwig REQ_OP_READ, gfp);
379457df33eSMatthew Wilcox (Oracle) /*
380457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to
381457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates
382f132ab7dSMatthew Wilcox (Oracle) * what do_mpage_read_folio does.
383457df33eSMatthew Wilcox (Oracle) */
38407888c66SChristoph Hellwig if (!ctx->bio) {
38507888c66SChristoph Hellwig ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
38607888c66SChristoph Hellwig orig_gfp);
38707888c66SChristoph Hellwig }
3889d24a13aSMatthew Wilcox (Oracle) if (ctx->rac)
389afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD;
390afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector;
391afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io;
392c2478469SJohannes Thumshirn bio_add_folio_nofail(ctx->bio, folio, plen, poff);
393afc51aaaSDarrick J. Wong }
394431c0566SMatthew Wilcox (Oracle)
395afc51aaaSDarrick J. Wong done:
396afc51aaaSDarrick J. Wong /*
397afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress.
398f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but
399afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next
400afc51aaaSDarrick J. Wong * iteration.
401afc51aaaSDarrick J. Wong */
402afc51aaaSDarrick J. Wong return pos - orig_pos + plen;
403afc51aaaSDarrick J. Wong }
404afc51aaaSDarrick J. Wong
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)4057479c505SMatthew Wilcox (Oracle) int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
406afc51aaaSDarrick J. Wong {
407f6d48000SChristoph Hellwig struct iomap_iter iter = {
4083aa9c659SMatthew Wilcox (Oracle) .inode = folio->mapping->host,
4093aa9c659SMatthew Wilcox (Oracle) .pos = folio_pos(folio),
4103aa9c659SMatthew Wilcox (Oracle) .len = folio_size(folio),
411f6d48000SChristoph Hellwig };
412f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = {
4133aa9c659SMatthew Wilcox (Oracle) .cur_folio = folio,
414f6d48000SChristoph Hellwig };
415f6d48000SChristoph Hellwig int ret;
416afc51aaaSDarrick J. Wong
4173aa9c659SMatthew Wilcox (Oracle) trace_iomap_readpage(iter.inode, 1);
4189e91c572SChristoph Hellwig
419f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0)
420f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
421f6d48000SChristoph Hellwig
422f6d48000SChristoph Hellwig if (ret < 0)
4233aa9c659SMatthew Wilcox (Oracle) folio_set_error(folio);
424afc51aaaSDarrick J. Wong
425afc51aaaSDarrick J. Wong if (ctx.bio) {
426afc51aaaSDarrick J. Wong submit_bio(ctx.bio);
4273aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(!ctx.cur_folio_in_bio);
428afc51aaaSDarrick J. Wong } else {
4293aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(ctx.cur_folio_in_bio);
4303aa9c659SMatthew Wilcox (Oracle) folio_unlock(folio);
431afc51aaaSDarrick J. Wong }
432afc51aaaSDarrick J. Wong
433afc51aaaSDarrick J. Wong /*
4342c69e205SMatthew Wilcox (Oracle) * Just like mpage_readahead and block_read_full_folio, we always
4357479c505SMatthew Wilcox (Oracle) * return 0 and just set the folio error flag on errors. This
436f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually.
437afc51aaaSDarrick J. Wong */
438afc51aaaSDarrick J. Wong return 0;
439afc51aaaSDarrick J. Wong }
4407479c505SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_read_folio);
441afc51aaaSDarrick J. Wong
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)442fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
443f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx)
444afc51aaaSDarrick J. Wong {
445f6d48000SChristoph Hellwig loff_t length = iomap_length(iter);
446afc51aaaSDarrick J. Wong loff_t done, ret;
447afc51aaaSDarrick J. Wong
448afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) {
4493aa9c659SMatthew Wilcox (Oracle) if (ctx->cur_folio &&
4503aa9c659SMatthew Wilcox (Oracle) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
4513aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio_in_bio)
4523aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx->cur_folio);
4533aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = NULL;
454afc51aaaSDarrick J. Wong }
4553aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio) {
4563aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = readahead_folio(ctx->rac);
4573aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = false;
458afc51aaaSDarrick J. Wong }
459f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done);
460d8af404fSAndreas Gruenbacher if (ret <= 0)
461d8af404fSAndreas Gruenbacher return ret;
462afc51aaaSDarrick J. Wong }
463afc51aaaSDarrick J. Wong
464afc51aaaSDarrick J. Wong return done;
465afc51aaaSDarrick J. Wong }
466afc51aaaSDarrick J. Wong
4679d24a13aSMatthew Wilcox (Oracle) /**
4689d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file.
4699d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read.
4709d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem.
4719d24a13aSMatthew Wilcox (Oracle) *
4729d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead
4739d24a13aSMatthew Wilcox (Oracle) * address_space operation.
4749d24a13aSMatthew Wilcox (Oracle) *
4759d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of
4769d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to
4779d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided.
4789d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This
4799d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause
4809d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered.
4819d24a13aSMatthew Wilcox (Oracle) */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)4829d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
483afc51aaaSDarrick J. Wong {
484f6d48000SChristoph Hellwig struct iomap_iter iter = {
485f6d48000SChristoph Hellwig .inode = rac->mapping->host,
486f6d48000SChristoph Hellwig .pos = readahead_pos(rac),
487f6d48000SChristoph Hellwig .len = readahead_length(rac),
488f6d48000SChristoph Hellwig };
489afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = {
4909d24a13aSMatthew Wilcox (Oracle) .rac = rac,
491afc51aaaSDarrick J. Wong };
492afc51aaaSDarrick J. Wong
493f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
4949e91c572SChristoph Hellwig
495f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0)
496f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx);
4979d24a13aSMatthew Wilcox (Oracle)
498afc51aaaSDarrick J. Wong if (ctx.bio)
499afc51aaaSDarrick J. Wong submit_bio(ctx.bio);
5003aa9c659SMatthew Wilcox (Oracle) if (ctx.cur_folio) {
5013aa9c659SMatthew Wilcox (Oracle) if (!ctx.cur_folio_in_bio)
5023aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx.cur_folio);
503afc51aaaSDarrick J. Wong }
504afc51aaaSDarrick J. Wong }
5059d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead);
506afc51aaaSDarrick J. Wong
507afc51aaaSDarrick J. Wong /*
5082e7e80f7SMatthew Wilcox (Oracle) * iomap_is_partially_uptodate checks whether blocks within a folio are
509afc51aaaSDarrick J. Wong * uptodate or not.
510afc51aaaSDarrick J. Wong *
5112e7e80f7SMatthew Wilcox (Oracle) * Returns true if all blocks which correspond to the specified part
5122e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate.
513afc51aaaSDarrick J. Wong */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)5142e7e80f7SMatthew Wilcox (Oracle) bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
515afc51aaaSDarrick J. Wong {
51604f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
5172e7e80f7SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
5182e7e80f7SMatthew Wilcox (Oracle) unsigned first, last, i;
519afc51aaaSDarrick J. Wong
52004f52c4eSRitesh Harjani (IBM) if (!ifs)
5212e7e80f7SMatthew Wilcox (Oracle) return false;
5222e7e80f7SMatthew Wilcox (Oracle)
5232756c818SMatthew Wilcox (Oracle) /* Caller's range may extend past the end of this folio */
5242756c818SMatthew Wilcox (Oracle) count = min(folio_size(folio) - from, count);
525afc51aaaSDarrick J. Wong
5262756c818SMatthew Wilcox (Oracle) /* First and last blocks in range within folio */
527afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits;
5282756c818SMatthew Wilcox (Oracle) last = (from + count - 1) >> inode->i_blkbits;
529afc51aaaSDarrick J. Wong
530afc51aaaSDarrick J. Wong for (i = first; i <= last; i++)
531cc86181aSRitesh Harjani (IBM) if (!ifs_block_is_uptodate(ifs, i))
5322e7e80f7SMatthew Wilcox (Oracle) return false;
5332e7e80f7SMatthew Wilcox (Oracle) return true;
534afc51aaaSDarrick J. Wong }
535afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
536afc51aaaSDarrick J. Wong
53798321b51SAndreas Gruenbacher /**
53898321b51SAndreas Gruenbacher * iomap_get_folio - get a folio reference for writing
53998321b51SAndreas Gruenbacher * @iter: iteration structure
54098321b51SAndreas Gruenbacher * @pos: start offset of write
541d6bb59a9SMatthew Wilcox (Oracle) * @len: Suggested size of folio to create.
54298321b51SAndreas Gruenbacher *
54398321b51SAndreas Gruenbacher * Returns a locked reference to the folio at @pos, or an error pointer if the
54498321b51SAndreas Gruenbacher * folio could not be obtained.
54598321b51SAndreas Gruenbacher */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)546d6bb59a9SMatthew Wilcox (Oracle) struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
54798321b51SAndreas Gruenbacher {
548ffc143dbSMatthew Wilcox (Oracle) fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
54998321b51SAndreas Gruenbacher
55098321b51SAndreas Gruenbacher if (iter->flags & IOMAP_NOWAIT)
55198321b51SAndreas Gruenbacher fgp |= FGP_NOWAIT;
552d6bb59a9SMatthew Wilcox (Oracle) fgp |= fgf_set_order(len);
55398321b51SAndreas Gruenbacher
55466dabbb6SChristoph Hellwig return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
55598321b51SAndreas Gruenbacher fgp, mapping_gfp_mask(iter->inode->i_mapping));
55698321b51SAndreas Gruenbacher }
55798321b51SAndreas Gruenbacher EXPORT_SYMBOL_GPL(iomap_get_folio);
55898321b51SAndreas Gruenbacher
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)5598597447dSMatthew Wilcox (Oracle) bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
560afc51aaaSDarrick J. Wong {
5618597447dSMatthew Wilcox (Oracle) trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
56239f16c83SMatthew Wilcox (Oracle) folio_size(folio));
5639e91c572SChristoph Hellwig
564afc51aaaSDarrick J. Wong /*
5657a8eb01bSMatthew Wilcox (Oracle) * If the folio is dirty, we refuse to release our metadata because
5667a8eb01bSMatthew Wilcox (Oracle) * it may be partially dirty. Once we track per-block dirty state,
5677a8eb01bSMatthew Wilcox (Oracle) * we can release the metadata if every block is dirty.
568afc51aaaSDarrick J. Wong */
5697a8eb01bSMatthew Wilcox (Oracle) if (folio_test_dirty(folio))
5708597447dSMatthew Wilcox (Oracle) return false;
57104f52c4eSRitesh Harjani (IBM) ifs_free(folio);
5728597447dSMatthew Wilcox (Oracle) return true;
573afc51aaaSDarrick J. Wong }
5748597447dSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_release_folio);
575afc51aaaSDarrick J. Wong
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)5768306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
577afc51aaaSDarrick J. Wong {
578d82354f6SMatthew Wilcox (Oracle) trace_iomap_invalidate_folio(folio->mapping->host,
5791241ebecSMatthew Wilcox (Oracle) folio_pos(folio) + offset, len);
5809e91c572SChristoph Hellwig
581afc51aaaSDarrick J. Wong /*
58260d82310SMatthew Wilcox (Oracle) * If we're invalidating the entire folio, clear the dirty state
58360d82310SMatthew Wilcox (Oracle) * from it and release it to avoid unnecessary buildup of the LRU.
584afc51aaaSDarrick J. Wong */
5858306a5f5SMatthew Wilcox (Oracle) if (offset == 0 && len == folio_size(folio)) {
5868306a5f5SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio));
5878306a5f5SMatthew Wilcox (Oracle) folio_cancel_dirty(folio);
58804f52c4eSRitesh Harjani (IBM) ifs_free(folio);
589afc51aaaSDarrick J. Wong }
590afc51aaaSDarrick J. Wong }
5918306a5f5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
5928306a5f5SMatthew Wilcox (Oracle)
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)5934ce02c67SRitesh Harjani (IBM) bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
5944ce02c67SRitesh Harjani (IBM) {
5954ce02c67SRitesh Harjani (IBM) struct inode *inode = mapping->host;
5964ce02c67SRitesh Harjani (IBM) size_t len = folio_size(folio);
5974ce02c67SRitesh Harjani (IBM)
5984ce02c67SRitesh Harjani (IBM) ifs_alloc(inode, folio, 0);
5994ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, 0, len);
6004ce02c67SRitesh Harjani (IBM) return filemap_dirty_folio(mapping, folio);
6014ce02c67SRitesh Harjani (IBM) }
6024ce02c67SRitesh Harjani (IBM) EXPORT_SYMBOL_GPL(iomap_dirty_folio);
6034ce02c67SRitesh Harjani (IBM)
604afc51aaaSDarrick J. Wong static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)605afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
606afc51aaaSDarrick J. Wong {
607afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode);
608afc51aaaSDarrick J. Wong
609afc51aaaSDarrick J. Wong /*
610afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the
611afc51aaaSDarrick J. Wong * write started inside the existing inode size.
612afc51aaaSDarrick J. Wong */
613afc51aaaSDarrick J. Wong if (pos + len > i_size)
614b71450e2SAndreas Gruenbacher truncate_pagecache_range(inode, max(pos, i_size),
615b71450e2SAndreas Gruenbacher pos + len - 1);
616afc51aaaSDarrick J. Wong }
617afc51aaaSDarrick J. Wong
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)618431c0566SMatthew Wilcox (Oracle) static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
619431c0566SMatthew Wilcox (Oracle) size_t poff, size_t plen, const struct iomap *iomap)
620afc51aaaSDarrick J. Wong {
621afc51aaaSDarrick J. Wong struct bio_vec bvec;
622afc51aaaSDarrick J. Wong struct bio bio;
623afc51aaaSDarrick J. Wong
62449add496SChristoph Hellwig bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
625afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
626c2478469SJohannes Thumshirn bio_add_folio_nofail(&bio, folio, plen, poff);
627afc51aaaSDarrick J. Wong return submit_bio_wait(&bio);
628afc51aaaSDarrick J. Wong }
629afc51aaaSDarrick J. Wong
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)630fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
631bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio *folio)
632afc51aaaSDarrick J. Wong {
633fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
63404f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs;
6351b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode);
6366cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size);
6376cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size);
638cae2de69SStefan Roesch unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
639431c0566SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos), to = from + len;
640431c0566SMatthew Wilcox (Oracle) size_t poff, plen;
641afc51aaaSDarrick J. Wong
642a01b8f22SRitesh Harjani (IBM) /*
64335d30c9cSDarrick J. Wong * If the write or zeroing completely overlaps the current folio, then
644a01b8f22SRitesh Harjani (IBM) * entire folio will be dirtied so there is no need for
645a01b8f22SRitesh Harjani (IBM) * per-block state tracking structures to be attached to this folio.
64635d30c9cSDarrick J. Wong * For the unshare case, we must read in the ondisk contents because we
64735d30c9cSDarrick J. Wong * are not changing pagecache contents.
648a01b8f22SRitesh Harjani (IBM) */
64935d30c9cSDarrick J. Wong if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
650a01b8f22SRitesh Harjani (IBM) pos + len >= folio_pos(folio) + folio_size(folio))
651afc51aaaSDarrick J. Wong return 0;
652afc51aaaSDarrick J. Wong
65304f52c4eSRitesh Harjani (IBM) ifs = ifs_alloc(iter->inode, folio, iter->flags);
65404f52c4eSRitesh Harjani (IBM) if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
655cae2de69SStefan Roesch return -EAGAIN;
6569753b868SStefan Roesch
657a01b8f22SRitesh Harjani (IBM) if (folio_test_uptodate(folio))
658a01b8f22SRitesh Harjani (IBM) return 0;
659a01b8f22SRitesh Harjani (IBM) folio_clear_error(folio);
660a01b8f22SRitesh Harjani (IBM)
661afc51aaaSDarrick J. Wong do {
662431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &block_start,
663afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen);
664afc51aaaSDarrick J. Wong if (plen == 0)
665afc51aaaSDarrick J. Wong break;
666afc51aaaSDarrick J. Wong
667b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) &&
66832a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) &&
669d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen))
670d3b40439SChristoph Hellwig continue;
671d3b40439SChristoph Hellwig
6721b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) {
673b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
67432a38a49SChristoph Hellwig return -EIO;
675431c0566SMatthew Wilcox (Oracle) folio_zero_segments(folio, poff, from, to, poff + plen);
67614284fedSMatthew Wilcox (Oracle) } else {
677cae2de69SStefan Roesch int status;
678cae2de69SStefan Roesch
679cae2de69SStefan Roesch if (iter->flags & IOMAP_NOWAIT)
680cae2de69SStefan Roesch return -EAGAIN;
681cae2de69SStefan Roesch
682cae2de69SStefan Roesch status = iomap_read_folio_sync(block_start, folio,
68314284fedSMatthew Wilcox (Oracle) poff, plen, srcmap);
684d3b40439SChristoph Hellwig if (status)
685d3b40439SChristoph Hellwig return status;
68614284fedSMatthew Wilcox (Oracle) }
6873ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, poff, plen);
688afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end);
689afc51aaaSDarrick J. Wong
690d3b40439SChristoph Hellwig return 0;
691afc51aaaSDarrick J. Wong }
692afc51aaaSDarrick J. Wong
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)69307c22b56SAndreas Gruenbacher static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
69407c22b56SAndreas Gruenbacher size_t len)
69507c22b56SAndreas Gruenbacher {
696471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
69707c22b56SAndreas Gruenbacher
698471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->get_folio)
699471859f5SAndreas Gruenbacher return folio_ops->get_folio(iter, pos, len);
70007c22b56SAndreas Gruenbacher else
701d6bb59a9SMatthew Wilcox (Oracle) return iomap_get_folio(iter, pos, len);
70207c22b56SAndreas Gruenbacher }
70307c22b56SAndreas Gruenbacher
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)7047a70a508SAndreas Gruenbacher static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
7057a70a508SAndreas Gruenbacher struct folio *folio)
7067a70a508SAndreas Gruenbacher {
707471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
7087a70a508SAndreas Gruenbacher
709471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->put_folio) {
710471859f5SAndreas Gruenbacher folio_ops->put_folio(iter->inode, pos, ret, folio);
7119060bc4dSAndreas Gruenbacher } else {
7127a70a508SAndreas Gruenbacher folio_unlock(folio);
7137a70a508SAndreas Gruenbacher folio_put(folio);
7147a70a508SAndreas Gruenbacher }
71580baab88SAndreas Gruenbacher }
7167a70a508SAndreas Gruenbacher
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)717fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter,
718bc6123a8SMatthew Wilcox (Oracle) struct folio *folio)
71969f4a26cSGao Xiang {
72069f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */
7211b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
72269f4a26cSGao Xiang return -EIO;
723874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio);
72469f4a26cSGao Xiang }
72569f4a26cSGao Xiang
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)726d7b64041SDave Chinner static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
727bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio **foliop)
728afc51aaaSDarrick J. Wong {
729471859f5SAndreas Gruenbacher const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
730fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
731d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio;
732afc51aaaSDarrick J. Wong int status = 0;
733afc51aaaSDarrick J. Wong
7341b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
7351b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap)
736c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length);
737afc51aaaSDarrick J. Wong
738afc51aaaSDarrick J. Wong if (fatal_signal_pending(current))
739afc51aaaSDarrick J. Wong return -EINTR;
740afc51aaaSDarrick J. Wong
741d454ab82SMatthew Wilcox (Oracle) if (!mapping_large_folio_support(iter->inode->i_mapping))
742d454ab82SMatthew Wilcox (Oracle) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
743d454ab82SMatthew Wilcox (Oracle)
74407c22b56SAndreas Gruenbacher folio = __iomap_get_folio(iter, pos, len);
7459060bc4dSAndreas Gruenbacher if (IS_ERR(folio))
74698321b51SAndreas Gruenbacher return PTR_ERR(folio);
747d7b64041SDave Chinner
748d7b64041SDave Chinner /*
749d7b64041SDave Chinner * Now we have a locked folio, before we do anything with it we need to
750d7b64041SDave Chinner * check that the iomap we have cached is not stale. The inode extent
751d7b64041SDave Chinner * mapping can change due to concurrent IO in flight (e.g.
752d7b64041SDave Chinner * IOMAP_UNWRITTEN state can change and memory reclaim could have
753d7b64041SDave Chinner * reclaimed a previously partially written page at this index after IO
754d7b64041SDave Chinner * completion before this write reaches this file offset) and hence we
755d7b64041SDave Chinner * could do the wrong thing here (zero a page range incorrectly or fail
756d7b64041SDave Chinner * to zero) and corrupt data.
757d7b64041SDave Chinner */
758471859f5SAndreas Gruenbacher if (folio_ops && folio_ops->iomap_valid) {
759471859f5SAndreas Gruenbacher bool iomap_valid = folio_ops->iomap_valid(iter->inode,
760d7b64041SDave Chinner &iter->iomap);
761d7b64041SDave Chinner if (!iomap_valid) {
762d7b64041SDave Chinner iter->iomap.flags |= IOMAP_F_STALE;
763d7b64041SDave Chinner status = 0;
764d7b64041SDave Chinner goto out_unlock;
765d7b64041SDave Chinner }
766d7b64041SDave Chinner }
767d7b64041SDave Chinner
768d454ab82SMatthew Wilcox (Oracle) if (pos + len > folio_pos(folio) + folio_size(folio))
769d454ab82SMatthew Wilcox (Oracle) len = folio_pos(folio) + folio_size(folio) - pos;
770afc51aaaSDarrick J. Wong
771c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE)
772bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin_inline(iter, folio);
7731b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
774d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
775afc51aaaSDarrick J. Wong else
776bc6123a8SMatthew Wilcox (Oracle) status = __iomap_write_begin(iter, pos, len, folio);
777afc51aaaSDarrick J. Wong
778afc51aaaSDarrick J. Wong if (unlikely(status))
779afc51aaaSDarrick J. Wong goto out_unlock;
780afc51aaaSDarrick J. Wong
781bc6123a8SMatthew Wilcox (Oracle) *foliop = folio;
782afc51aaaSDarrick J. Wong return 0;
783afc51aaaSDarrick J. Wong
784afc51aaaSDarrick J. Wong out_unlock:
7857a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, 0, folio);
7861b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len);
787afc51aaaSDarrick J. Wong
788afc51aaaSDarrick J. Wong return status;
789afc51aaaSDarrick J. Wong }
790afc51aaaSDarrick J. Wong
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)791e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
792bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio)
793afc51aaaSDarrick J. Wong {
794bc6123a8SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
795afc51aaaSDarrick J. Wong
796afc51aaaSDarrick J. Wong /*
797afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we
7987479c505SMatthew Wilcox (Oracle) * don't have to worry about a read_folio reading them and overwriting a
799f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only
800afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a
8017479c505SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write.
802afc51aaaSDarrick J. Wong *
803f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a
804f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to
805f1f264b4SAndreas Gruenbacher * redo the whole thing.
806afc51aaaSDarrick J. Wong */
807bc6123a8SMatthew Wilcox (Oracle) if (unlikely(copied < len && !folio_test_uptodate(folio)))
808afc51aaaSDarrick J. Wong return 0;
8093ea5c76cSRitesh Harjani (IBM) iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
8104ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
811bc6123a8SMatthew Wilcox (Oracle) filemap_dirty_folio(inode->i_mapping, folio);
812afc51aaaSDarrick J. Wong return copied;
813afc51aaaSDarrick J. Wong }
814afc51aaaSDarrick J. Wong
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)815fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter,
8169c4ce08dSMatthew Wilcox (Oracle) struct folio *folio, loff_t pos, size_t copied)
817afc51aaaSDarrick J. Wong {
818fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap;
819afc51aaaSDarrick J. Wong void *addr;
820afc51aaaSDarrick J. Wong
8219c4ce08dSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio));
82269f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap));
823afc51aaaSDarrick J. Wong
8249c4ce08dSMatthew Wilcox (Oracle) flush_dcache_folio(folio);
8259c4ce08dSMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, pos);
826ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied);
827ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr);
828afc51aaaSDarrick J. Wong
8291b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode);
830afc51aaaSDarrick J. Wong return copied;
831afc51aaaSDarrick J. Wong }
832afc51aaaSDarrick J. Wong
833e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)8341b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
835bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio)
836afc51aaaSDarrick J. Wong {
837fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
8381b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size;
839e25ba8cbSMatthew Wilcox (Oracle) size_t ret;
840afc51aaaSDarrick J. Wong
841c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) {
8429c4ce08dSMatthew Wilcox (Oracle) ret = iomap_write_end_inline(iter, folio, pos, copied);
843c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
8441b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
845bc6123a8SMatthew Wilcox (Oracle) copied, &folio->page, NULL);
846afc51aaaSDarrick J. Wong } else {
847bc6123a8SMatthew Wilcox (Oracle) ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
848afc51aaaSDarrick J. Wong }
849afc51aaaSDarrick J. Wong
850afc51aaaSDarrick J. Wong /*
851afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page
852afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk,
853afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed.
854afc51aaaSDarrick J. Wong */
855afc51aaaSDarrick J. Wong if (pos + ret > old_size) {
8561b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret);
8571b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
858afc51aaaSDarrick J. Wong }
8597a70a508SAndreas Gruenbacher __iomap_put_folio(iter, pos, ret, folio);
860afc51aaaSDarrick J. Wong
861afc51aaaSDarrick J. Wong if (old_size < pos)
8621b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos);
863afc51aaaSDarrick J. Wong if (ret < len)
864d74999c8SAndreas Gruenbacher iomap_write_failed(iter->inode, pos + ret, len - ret);
865afc51aaaSDarrick J. Wong return ret;
866afc51aaaSDarrick J. Wong }
867afc51aaaSDarrick J. Wong
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)868ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
869afc51aaaSDarrick J. Wong {
870ce83a025SChristoph Hellwig loff_t length = iomap_length(iter);
871ce83a025SChristoph Hellwig loff_t pos = iter->pos;
872afc51aaaSDarrick J. Wong ssize_t written = 0;
873ce83a025SChristoph Hellwig long status = 0;
874cae2de69SStefan Roesch struct address_space *mapping = iter->inode->i_mapping;
8759ee7a77cSXu Yang size_t chunk = mapping_max_folio_size(mapping);
876cae2de69SStefan Roesch unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
877afc51aaaSDarrick J. Wong
878afc51aaaSDarrick J. Wong do {
879bc6123a8SMatthew Wilcox (Oracle) struct folio *folio;
8805d8edfb9SMatthew Wilcox (Oracle) size_t offset; /* Offset into folio */
8815d8edfb9SMatthew Wilcox (Oracle) size_t bytes; /* Bytes to write to folio */
882afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */
883afc51aaaSDarrick J. Wong
8843ac97479SJan Stancek bytes = iov_iter_count(i);
8853ac97479SJan Stancek retry:
8865d8edfb9SMatthew Wilcox (Oracle) offset = pos & (chunk - 1);
8873ac97479SJan Stancek bytes = min(chunk - offset, bytes);
888cae2de69SStefan Roesch status = balance_dirty_pages_ratelimited_flags(mapping,
889cae2de69SStefan Roesch bdp_flags);
890cae2de69SStefan Roesch if (unlikely(status))
891cae2de69SStefan Roesch break;
892cae2de69SStefan Roesch
893afc51aaaSDarrick J. Wong if (bytes > length)
894afc51aaaSDarrick J. Wong bytes = length;
895afc51aaaSDarrick J. Wong
896afc51aaaSDarrick J. Wong /*
897f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_.
898afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the
899afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked
900afc51aaaSDarrick J. Wong * up-to-date.
901cae2de69SStefan Roesch *
902cae2de69SStefan Roesch * For async buffered writes the assumption is that the user
903cae2de69SStefan Roesch * page has already been faulted in. This can be optimized by
904cae2de69SStefan Roesch * faulting the user page.
905afc51aaaSDarrick J. Wong */
906631f871fSAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
907afc51aaaSDarrick J. Wong status = -EFAULT;
908afc51aaaSDarrick J. Wong break;
909afc51aaaSDarrick J. Wong }
910afc51aaaSDarrick J. Wong
911bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio);
912afc51aaaSDarrick J. Wong if (unlikely(status))
913afc51aaaSDarrick J. Wong break;
914d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE)
915d7b64041SDave Chinner break;
916afc51aaaSDarrick J. Wong
9175d8edfb9SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos);
9185d8edfb9SMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset)
9195d8edfb9SMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset;
9205d8edfb9SMatthew Wilcox (Oracle)
921cae2de69SStefan Roesch if (mapping_writably_mapped(mapping))
9225d8edfb9SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
923afc51aaaSDarrick J. Wong
9245d8edfb9SMatthew Wilcox (Oracle) copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
925bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, copied, folio);
926afc51aaaSDarrick J. Wong
927f0b65f39SAl Viro if (unlikely(copied != status))
928f0b65f39SAl Viro iov_iter_revert(i, copied - status);
929afc51aaaSDarrick J. Wong
930f0b65f39SAl Viro cond_resched();
931bc1bb416SAl Viro if (unlikely(status == 0)) {
932afc51aaaSDarrick J. Wong /*
933bc1bb416SAl Viro * A short copy made iomap_write_end() reject the
934bc1bb416SAl Viro * thing entirely. Might be memory poisoning
935bc1bb416SAl Viro * halfway through, might be a race with munmap,
936bc1bb416SAl Viro * might be severe memory pressure.
937afc51aaaSDarrick J. Wong */
9385d8edfb9SMatthew Wilcox (Oracle) if (chunk > PAGE_SIZE)
9395d8edfb9SMatthew Wilcox (Oracle) chunk /= 2;
9403ac97479SJan Stancek if (copied) {
9413ac97479SJan Stancek bytes = copied;
9423ac97479SJan Stancek goto retry;
9433ac97479SJan Stancek }
9445d8edfb9SMatthew Wilcox (Oracle) } else {
945f0b65f39SAl Viro pos += status;
946f0b65f39SAl Viro written += status;
947f0b65f39SAl Viro length -= status;
9485d8edfb9SMatthew Wilcox (Oracle) }
949afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length);
950afc51aaaSDarrick J. Wong
95118e419f6SStefan Roesch if (status == -EAGAIN) {
95218e419f6SStefan Roesch iov_iter_revert(i, written);
95318e419f6SStefan Roesch return -EAGAIN;
95418e419f6SStefan Roesch }
955afc51aaaSDarrick J. Wong return written ? written : status;
956afc51aaaSDarrick J. Wong }
957afc51aaaSDarrick J. Wong
958afc51aaaSDarrick J. Wong ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops)959ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
960afc51aaaSDarrick J. Wong const struct iomap_ops *ops)
961afc51aaaSDarrick J. Wong {
962ce83a025SChristoph Hellwig struct iomap_iter iter = {
963ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host,
964ce83a025SChristoph Hellwig .pos = iocb->ki_pos,
965ce83a025SChristoph Hellwig .len = iov_iter_count(i),
966ce83a025SChristoph Hellwig .flags = IOMAP_WRITE,
967ce83a025SChristoph Hellwig };
968219580eeSChristoph Hellwig ssize_t ret;
969afc51aaaSDarrick J. Wong
970cae2de69SStefan Roesch if (iocb->ki_flags & IOCB_NOWAIT)
971cae2de69SStefan Roesch iter.flags |= IOMAP_NOWAIT;
972cae2de69SStefan Roesch
973ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0)
974ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i);
975219580eeSChristoph Hellwig
97620c64ec8SChristoph Hellwig if (unlikely(iter.pos == iocb->ki_pos))
977ce83a025SChristoph Hellwig return ret;
978219580eeSChristoph Hellwig ret = iter.pos - iocb->ki_pos;
979efa96cc9SChristoph Hellwig iocb->ki_pos = iter.pos;
980219580eeSChristoph Hellwig return ret;
981afc51aaaSDarrick J. Wong }
982afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
983afc51aaaSDarrick J. Wong
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)9844ce02c67SRitesh Harjani (IBM) static int iomap_write_delalloc_ifs_punch(struct inode *inode,
9854ce02c67SRitesh Harjani (IBM) struct folio *folio, loff_t start_byte, loff_t end_byte,
9864ce02c67SRitesh Harjani (IBM) iomap_punch_t punch)
9874ce02c67SRitesh Harjani (IBM) {
9884ce02c67SRitesh Harjani (IBM) unsigned int first_blk, last_blk, i;
9894ce02c67SRitesh Harjani (IBM) loff_t last_byte;
9904ce02c67SRitesh Harjani (IBM) u8 blkbits = inode->i_blkbits;
9914ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs;
9924ce02c67SRitesh Harjani (IBM) int ret = 0;
9934ce02c67SRitesh Harjani (IBM)
9944ce02c67SRitesh Harjani (IBM) /*
9954ce02c67SRitesh Harjani (IBM) * When we have per-block dirty tracking, there can be
9964ce02c67SRitesh Harjani (IBM) * blocks within a folio which are marked uptodate
9974ce02c67SRitesh Harjani (IBM) * but not dirty. In that case it is necessary to punch
9984ce02c67SRitesh Harjani (IBM) * out such blocks to avoid leaking any delalloc blocks.
9994ce02c67SRitesh Harjani (IBM) */
10004ce02c67SRitesh Harjani (IBM) ifs = folio->private;
10014ce02c67SRitesh Harjani (IBM) if (!ifs)
10024ce02c67SRitesh Harjani (IBM) return ret;
10034ce02c67SRitesh Harjani (IBM)
10044ce02c67SRitesh Harjani (IBM) last_byte = min_t(loff_t, end_byte - 1,
10054ce02c67SRitesh Harjani (IBM) folio_pos(folio) + folio_size(folio) - 1);
10064ce02c67SRitesh Harjani (IBM) first_blk = offset_in_folio(folio, start_byte) >> blkbits;
10074ce02c67SRitesh Harjani (IBM) last_blk = offset_in_folio(folio, last_byte) >> blkbits;
10084ce02c67SRitesh Harjani (IBM) for (i = first_blk; i <= last_blk; i++) {
10094ce02c67SRitesh Harjani (IBM) if (!ifs_block_is_dirty(folio, ifs, i)) {
10104ce02c67SRitesh Harjani (IBM) ret = punch(inode, folio_pos(folio) + (i << blkbits),
10114ce02c67SRitesh Harjani (IBM) 1 << blkbits);
10124ce02c67SRitesh Harjani (IBM) if (ret)
10134ce02c67SRitesh Harjani (IBM) return ret;
10144ce02c67SRitesh Harjani (IBM) }
10154ce02c67SRitesh Harjani (IBM) }
10164ce02c67SRitesh Harjani (IBM)
10174ce02c67SRitesh Harjani (IBM) return ret;
10184ce02c67SRitesh Harjani (IBM) }
10194ce02c67SRitesh Harjani (IBM)
10204ce02c67SRitesh Harjani (IBM)
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)10217f79d85bSRitesh Harjani (IBM) static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
10227f79d85bSRitesh Harjani (IBM) loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
10237f79d85bSRitesh Harjani (IBM) iomap_punch_t punch)
10247f79d85bSRitesh Harjani (IBM) {
10257f79d85bSRitesh Harjani (IBM) int ret = 0;
10267f79d85bSRitesh Harjani (IBM)
10277f79d85bSRitesh Harjani (IBM) if (!folio_test_dirty(folio))
10287f79d85bSRitesh Harjani (IBM) return ret;
10297f79d85bSRitesh Harjani (IBM)
10307f79d85bSRitesh Harjani (IBM) /* if dirty, punch up to offset */
10317f79d85bSRitesh Harjani (IBM) if (start_byte > *punch_start_byte) {
10327f79d85bSRitesh Harjani (IBM) ret = punch(inode, *punch_start_byte,
10337f79d85bSRitesh Harjani (IBM) start_byte - *punch_start_byte);
10347f79d85bSRitesh Harjani (IBM) if (ret)
10357f79d85bSRitesh Harjani (IBM) return ret;
10367f79d85bSRitesh Harjani (IBM) }
10377f79d85bSRitesh Harjani (IBM)
10384ce02c67SRitesh Harjani (IBM) /* Punch non-dirty blocks within folio */
10394ce02c67SRitesh Harjani (IBM) ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
10404ce02c67SRitesh Harjani (IBM) end_byte, punch);
10414ce02c67SRitesh Harjani (IBM) if (ret)
10424ce02c67SRitesh Harjani (IBM) return ret;
10434ce02c67SRitesh Harjani (IBM)
10447f79d85bSRitesh Harjani (IBM) /*
10457f79d85bSRitesh Harjani (IBM) * Make sure the next punch start is correctly bound to
10467f79d85bSRitesh Harjani (IBM) * the end of this data range, not the end of the folio.
10477f79d85bSRitesh Harjani (IBM) */
10487f79d85bSRitesh Harjani (IBM) *punch_start_byte = min_t(loff_t, end_byte,
10497f79d85bSRitesh Harjani (IBM) folio_pos(folio) + folio_size(folio));
10507f79d85bSRitesh Harjani (IBM)
10517f79d85bSRitesh Harjani (IBM) return ret;
10527f79d85bSRitesh Harjani (IBM) }
10537f79d85bSRitesh Harjani (IBM)
10549c7babf9SDave Chinner /*
1055f43dc4dcSDave Chinner * Scan the data range passed to us for dirty page cache folios. If we find a
1056684f7e6dSGeert Uytterhoeven * dirty folio, punch out the preceding range and update the offset from which
1057f43dc4dcSDave Chinner * the next punch will start from.
1058f43dc4dcSDave Chinner *
1059f43dc4dcSDave Chinner * We can punch out storage reservations under clean pages because they either
1060f43dc4dcSDave Chinner * contain data that has been written back - in which case the delalloc punch
1061f43dc4dcSDave Chinner * over that range is a no-op - or they have been read faults in which case they
1062f43dc4dcSDave Chinner * contain zeroes and we can remove the delalloc backing range and any new
1063f43dc4dcSDave Chinner * writes to those pages will do the normal hole filling operation...
1064f43dc4dcSDave Chinner *
1065f43dc4dcSDave Chinner * This makes the logic simple: we only need to keep the delalloc extents only
1066f43dc4dcSDave Chinner * over the dirty ranges of the page cache.
1067f43dc4dcSDave Chinner *
1068f43dc4dcSDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1069f43dc4dcSDave Chinner * simplify range iterations.
1070f43dc4dcSDave Chinner */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1071f43dc4dcSDave Chinner static int iomap_write_delalloc_scan(struct inode *inode,
1072f43dc4dcSDave Chinner loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
10730af2b37dSRitesh Harjani (IBM) iomap_punch_t punch)
1074f43dc4dcSDave Chinner {
1075f43dc4dcSDave Chinner while (start_byte < end_byte) {
1076f43dc4dcSDave Chinner struct folio *folio;
10777f79d85bSRitesh Harjani (IBM) int ret;
1078f43dc4dcSDave Chinner
1079f43dc4dcSDave Chinner /* grab locked page */
1080f43dc4dcSDave Chinner folio = filemap_lock_folio(inode->i_mapping,
1081f43dc4dcSDave Chinner start_byte >> PAGE_SHIFT);
108266dabbb6SChristoph Hellwig if (IS_ERR(folio)) {
1083f43dc4dcSDave Chinner start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1084f43dc4dcSDave Chinner PAGE_SIZE;
1085f43dc4dcSDave Chinner continue;
1086f43dc4dcSDave Chinner }
1087f43dc4dcSDave Chinner
10887f79d85bSRitesh Harjani (IBM) ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
10897f79d85bSRitesh Harjani (IBM) start_byte, end_byte, punch);
10907f79d85bSRitesh Harjani (IBM) if (ret) {
1091f43dc4dcSDave Chinner folio_unlock(folio);
1092f43dc4dcSDave Chinner folio_put(folio);
10937f79d85bSRitesh Harjani (IBM) return ret;
1094f43dc4dcSDave Chinner }
1095f43dc4dcSDave Chinner
1096f43dc4dcSDave Chinner /* move offset to start of next folio in range */
1097f43dc4dcSDave Chinner start_byte = folio_next_index(folio) << PAGE_SHIFT;
1098f43dc4dcSDave Chinner folio_unlock(folio);
1099f43dc4dcSDave Chinner folio_put(folio);
1100f43dc4dcSDave Chinner }
1101f43dc4dcSDave Chinner return 0;
1102f43dc4dcSDave Chinner }
1103f43dc4dcSDave Chinner
1104f43dc4dcSDave Chinner /*
1105f43dc4dcSDave Chinner * Punch out all the delalloc blocks in the range given except for those that
1106f43dc4dcSDave Chinner * have dirty data still pending in the page cache - those are going to be
1107f43dc4dcSDave Chinner * written and so must still retain the delalloc backing for writeback.
1108f43dc4dcSDave Chinner *
1109f43dc4dcSDave Chinner * As we are scanning the page cache for data, we don't need to reimplement the
1110f43dc4dcSDave Chinner * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1111f43dc4dcSDave Chinner * start and end of data ranges correctly even for sub-folio block sizes. This
1112f43dc4dcSDave Chinner * byte range based iteration is especially convenient because it means we
1113f43dc4dcSDave Chinner * don't have to care about variable size folios, nor where the start or end of
1114f43dc4dcSDave Chinner * the data range lies within a folio, if they lie within the same folio or even
1115f43dc4dcSDave Chinner * if there are multiple discontiguous data ranges within the folio.
1116f43dc4dcSDave Chinner *
1117f43dc4dcSDave Chinner * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1118f43dc4dcSDave Chinner * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1119f43dc4dcSDave Chinner * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1120f43dc4dcSDave Chinner * date. A write page fault can then mark it dirty. If we then fail a write()
1121f43dc4dcSDave Chinner * beyond EOF into that up to date cached range, we allocate a delalloc block
1122f43dc4dcSDave Chinner * beyond EOF and then have to punch it out. Because the range is up to date,
1123f43dc4dcSDave Chinner * mapping_seek_hole_data() will return it, and we will skip the punch because
1124f43dc4dcSDave Chinner * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1125f43dc4dcSDave Chinner * beyond EOF in this case as writeback will never write back and covert that
1126f43dc4dcSDave Chinner * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1127f43dc4dcSDave Chinner * resulting in always punching out the range from the EOF to the end of the
1128f43dc4dcSDave Chinner * range the iomap spans.
1129f43dc4dcSDave Chinner *
1130f43dc4dcSDave Chinner * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1131f43dc4dcSDave Chinner * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1132f43dc4dcSDave Chinner * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1133f43dc4dcSDave Chinner * returns the end of the data range (data_end). Using closed intervals would
1134f43dc4dcSDave Chinner * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1135f43dc4dcSDave Chinner * the code to subtle off-by-one bugs....
1136f43dc4dcSDave Chinner */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1137f43dc4dcSDave Chinner static int iomap_write_delalloc_release(struct inode *inode,
11380af2b37dSRitesh Harjani (IBM) loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1139f43dc4dcSDave Chinner {
1140f43dc4dcSDave Chinner loff_t punch_start_byte = start_byte;
1141f43dc4dcSDave Chinner loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1142f43dc4dcSDave Chinner int error = 0;
1143f43dc4dcSDave Chinner
1144f43dc4dcSDave Chinner /*
1145f43dc4dcSDave Chinner * Lock the mapping to avoid races with page faults re-instantiating
1146f43dc4dcSDave Chinner * folios and dirtying them via ->page_mkwrite whilst we walk the
1147f43dc4dcSDave Chinner * cache and perform delalloc extent removal. Failing to do this can
1148f43dc4dcSDave Chinner * leave dirty pages with no space reservation in the cache.
1149f43dc4dcSDave Chinner */
1150f43dc4dcSDave Chinner filemap_invalidate_lock(inode->i_mapping);
1151f43dc4dcSDave Chinner while (start_byte < scan_end_byte) {
1152f43dc4dcSDave Chinner loff_t data_end;
1153f43dc4dcSDave Chinner
1154f43dc4dcSDave Chinner start_byte = mapping_seek_hole_data(inode->i_mapping,
1155f43dc4dcSDave Chinner start_byte, scan_end_byte, SEEK_DATA);
1156f43dc4dcSDave Chinner /*
1157f43dc4dcSDave Chinner * If there is no more data to scan, all that is left is to
1158f43dc4dcSDave Chinner * punch out the remaining range.
1159f43dc4dcSDave Chinner */
1160f43dc4dcSDave Chinner if (start_byte == -ENXIO || start_byte == scan_end_byte)
1161f43dc4dcSDave Chinner break;
1162f43dc4dcSDave Chinner if (start_byte < 0) {
1163f43dc4dcSDave Chinner error = start_byte;
1164f43dc4dcSDave Chinner goto out_unlock;
1165f43dc4dcSDave Chinner }
1166f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte < punch_start_byte);
1167f43dc4dcSDave Chinner WARN_ON_ONCE(start_byte > scan_end_byte);
1168f43dc4dcSDave Chinner
1169f43dc4dcSDave Chinner /*
1170f43dc4dcSDave Chinner * We find the end of this contiguous cached data range by
1171f43dc4dcSDave Chinner * seeking from start_byte to the beginning of the next hole.
1172f43dc4dcSDave Chinner */
1173f43dc4dcSDave Chinner data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1174f43dc4dcSDave Chinner scan_end_byte, SEEK_HOLE);
1175f43dc4dcSDave Chinner if (data_end < 0) {
1176f43dc4dcSDave Chinner error = data_end;
1177f43dc4dcSDave Chinner goto out_unlock;
1178f43dc4dcSDave Chinner }
1179f43dc4dcSDave Chinner WARN_ON_ONCE(data_end <= start_byte);
1180f43dc4dcSDave Chinner WARN_ON_ONCE(data_end > scan_end_byte);
1181f43dc4dcSDave Chinner
1182f43dc4dcSDave Chinner error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1183f43dc4dcSDave Chinner start_byte, data_end, punch);
1184f43dc4dcSDave Chinner if (error)
1185f43dc4dcSDave Chinner goto out_unlock;
1186f43dc4dcSDave Chinner
1187f43dc4dcSDave Chinner /* The next data search starts at the end of this one. */
1188f43dc4dcSDave Chinner start_byte = data_end;
1189f43dc4dcSDave Chinner }
1190f43dc4dcSDave Chinner
1191f43dc4dcSDave Chinner if (punch_start_byte < end_byte)
1192f43dc4dcSDave Chinner error = punch(inode, punch_start_byte,
1193f43dc4dcSDave Chinner end_byte - punch_start_byte);
1194f43dc4dcSDave Chinner out_unlock:
1195f43dc4dcSDave Chinner filemap_invalidate_unlock(inode->i_mapping);
1196f43dc4dcSDave Chinner return error;
1197f43dc4dcSDave Chinner }
1198f43dc4dcSDave Chinner
1199f43dc4dcSDave Chinner /*
12009c7babf9SDave Chinner * When a short write occurs, the filesystem may need to remove reserved space
12019c7babf9SDave Chinner * that was allocated in ->iomap_begin from it's ->iomap_end method. For
12029c7babf9SDave Chinner * filesystems that use delayed allocation, we need to punch out delalloc
12039c7babf9SDave Chinner * extents from the range that are not dirty in the page cache. As the write can
12049c7babf9SDave Chinner * race with page faults, there can be dirty pages over the delalloc extent
12059c7babf9SDave Chinner * outside the range of a short write but still within the delalloc extent
12069c7babf9SDave Chinner * allocated for this iomap.
12079c7babf9SDave Chinner *
12089c7babf9SDave Chinner * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1209f43dc4dcSDave Chinner * simplify range iterations.
1210f43dc4dcSDave Chinner *
1211f43dc4dcSDave Chinner * The punch() callback *must* only punch delalloc extents in the range passed
1212f43dc4dcSDave Chinner * to it. It must skip over all other types of extents in the range and leave
1213f43dc4dcSDave Chinner * them completely unchanged. It must do this punch atomically with respect to
1214f43dc4dcSDave Chinner * other extent modifications.
1215f43dc4dcSDave Chinner *
1216f43dc4dcSDave Chinner * The punch() callback may be called with a folio locked to prevent writeback
1217f43dc4dcSDave Chinner * extent allocation racing at the edge of the range we are currently punching.
1218f43dc4dcSDave Chinner * The locked folio may or may not cover the range being punched, so it is not
1219f43dc4dcSDave Chinner * safe for the punch() callback to lock folios itself.
1220f43dc4dcSDave Chinner *
1221f43dc4dcSDave Chinner * Lock order is:
1222f43dc4dcSDave Chinner *
1223f43dc4dcSDave Chinner * inode->i_rwsem (shared or exclusive)
1224f43dc4dcSDave Chinner * inode->i_mapping->invalidate_lock (exclusive)
1225f43dc4dcSDave Chinner * folio_lock()
1226f43dc4dcSDave Chinner * ->punch
1227f43dc4dcSDave Chinner * internal filesystem allocation lock
12289c7babf9SDave Chinner */
iomap_file_buffered_write_punch_delalloc(struct inode * inode,struct iomap * iomap,loff_t pos,loff_t length,ssize_t written,iomap_punch_t punch)12299c7babf9SDave Chinner int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
12309c7babf9SDave Chinner struct iomap *iomap, loff_t pos, loff_t length,
12310af2b37dSRitesh Harjani (IBM) ssize_t written, iomap_punch_t punch)
12329c7babf9SDave Chinner {
12339c7babf9SDave Chinner loff_t start_byte;
12349c7babf9SDave Chinner loff_t end_byte;
1235302efbefSLu Hongfei unsigned int blocksize = i_blocksize(inode);
12369c7babf9SDave Chinner
12379c7babf9SDave Chinner if (iomap->type != IOMAP_DELALLOC)
12389c7babf9SDave Chinner return 0;
12399c7babf9SDave Chinner
12409c7babf9SDave Chinner /* If we didn't reserve the blocks, we're not allowed to punch them. */
12419c7babf9SDave Chinner if (!(iomap->flags & IOMAP_F_NEW))
12429c7babf9SDave Chinner return 0;
12439c7babf9SDave Chinner
12449c7babf9SDave Chinner /*
12459c7babf9SDave Chinner * start_byte refers to the first unused block after a short write. If
12469c7babf9SDave Chinner * nothing was written, round offset down to point at the first block in
12479c7babf9SDave Chinner * the range.
12489c7babf9SDave Chinner */
12499c7babf9SDave Chinner if (unlikely(!written))
12509c7babf9SDave Chinner start_byte = round_down(pos, blocksize);
12519c7babf9SDave Chinner else
12529c7babf9SDave Chinner start_byte = round_up(pos + written, blocksize);
12539c7babf9SDave Chinner end_byte = round_up(pos + length, blocksize);
12549c7babf9SDave Chinner
12559c7babf9SDave Chinner /* Nothing to do if we've written the entire delalloc extent */
12569c7babf9SDave Chinner if (start_byte >= end_byte)
12579c7babf9SDave Chinner return 0;
12589c7babf9SDave Chinner
1259f43dc4dcSDave Chinner return iomap_write_delalloc_release(inode, start_byte, end_byte,
1260f43dc4dcSDave Chinner punch);
12619c7babf9SDave Chinner }
12629c7babf9SDave Chinner EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
12639c7babf9SDave Chinner
iomap_unshare_iter(struct iomap_iter * iter)12648fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1265afc51aaaSDarrick J. Wong {
12668fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap;
1267fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
12688fc274d1SChristoph Hellwig loff_t pos = iter->pos;
12698fc274d1SChristoph Hellwig loff_t length = iomap_length(iter);
1270d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0;
1271afc51aaaSDarrick J. Wong
12723590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */
12733590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED))
12743590c4d8SChristoph Hellwig return length;
12753590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */
1276c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
12773590c4d8SChristoph Hellwig return length;
12783590c4d8SChristoph Hellwig
1279afc51aaaSDarrick J. Wong do {
1280bc6123a8SMatthew Wilcox (Oracle) struct folio *folio;
1281a5f31a50SDarrick J. Wong int status;
1282a5f31a50SDarrick J. Wong size_t offset;
1283a5f31a50SDarrick J. Wong size_t bytes = min_t(u64, SIZE_MAX, length);
1284afc51aaaSDarrick J. Wong
1285bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio);
1286afc51aaaSDarrick J. Wong if (unlikely(status))
1287afc51aaaSDarrick J. Wong return status;
1288a5f31a50SDarrick J. Wong if (iomap->flags & IOMAP_F_STALE)
1289d7b64041SDave Chinner break;
1290afc51aaaSDarrick J. Wong
1291a5f31a50SDarrick J. Wong offset = offset_in_folio(folio, pos);
1292a5f31a50SDarrick J. Wong if (bytes > folio_size(folio) - offset)
1293a5f31a50SDarrick J. Wong bytes = folio_size(folio) - offset;
1294a5f31a50SDarrick J. Wong
1295a5f31a50SDarrick J. Wong bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1296a5f31a50SDarrick J. Wong if (WARN_ON_ONCE(bytes == 0))
1297afc51aaaSDarrick J. Wong return -EIO;
1298afc51aaaSDarrick J. Wong
1299afc51aaaSDarrick J. Wong cond_resched();
1300afc51aaaSDarrick J. Wong
1301a5f31a50SDarrick J. Wong pos += bytes;
1302a5f31a50SDarrick J. Wong written += bytes;
1303a5f31a50SDarrick J. Wong length -= bytes;
1304afc51aaaSDarrick J. Wong
13058fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1306a5f31a50SDarrick J. Wong } while (length > 0);
1307afc51aaaSDarrick J. Wong
1308afc51aaaSDarrick J. Wong return written;
1309afc51aaaSDarrick J. Wong }
1310afc51aaaSDarrick J. Wong
1311afc51aaaSDarrick J. Wong int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)13123590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1313afc51aaaSDarrick J. Wong const struct iomap_ops *ops)
1314afc51aaaSDarrick J. Wong {
13158fc274d1SChristoph Hellwig struct iomap_iter iter = {
13168fc274d1SChristoph Hellwig .inode = inode,
13178fc274d1SChristoph Hellwig .pos = pos,
13188fc274d1SChristoph Hellwig .len = len,
1319b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE,
13208fc274d1SChristoph Hellwig };
13218fc274d1SChristoph Hellwig int ret;
1322afc51aaaSDarrick J. Wong
13238fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0)
13248fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter);
1325afc51aaaSDarrick J. Wong return ret;
1326afc51aaaSDarrick J. Wong }
13273590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare);
1328afc51aaaSDarrick J. Wong
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)13292aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1330afc51aaaSDarrick J. Wong {
1331fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter);
13322aa3048eSChristoph Hellwig loff_t pos = iter->pos;
13332aa3048eSChristoph Hellwig loff_t length = iomap_length(iter);
1334afc51aaaSDarrick J. Wong loff_t written = 0;
1335afc51aaaSDarrick J. Wong
1336afc51aaaSDarrick J. Wong /* already zeroed? we're done. */
1337c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
133881ee8e52SMatthew Wilcox (Oracle) return length;
1339afc51aaaSDarrick J. Wong
1340afc51aaaSDarrick J. Wong do {
13414d7bd0ebSMatthew Wilcox (Oracle) struct folio *folio;
13424d7bd0ebSMatthew Wilcox (Oracle) int status;
13434d7bd0ebSMatthew Wilcox (Oracle) size_t offset;
13444d7bd0ebSMatthew Wilcox (Oracle) size_t bytes = min_t(u64, SIZE_MAX, length);
1345afc51aaaSDarrick J. Wong
13464d7bd0ebSMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio);
13474d7bd0ebSMatthew Wilcox (Oracle) if (status)
13484d7bd0ebSMatthew Wilcox (Oracle) return status;
1349d7b64041SDave Chinner if (iter->iomap.flags & IOMAP_F_STALE)
1350d7b64041SDave Chinner break;
13514d7bd0ebSMatthew Wilcox (Oracle)
13524d7bd0ebSMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos);
13534d7bd0ebSMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset)
13544d7bd0ebSMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset;
13554d7bd0ebSMatthew Wilcox (Oracle)
13564d7bd0ebSMatthew Wilcox (Oracle) folio_zero_range(folio, offset, bytes);
13574d7bd0ebSMatthew Wilcox (Oracle) folio_mark_accessed(folio);
13584d7bd0ebSMatthew Wilcox (Oracle)
13594d7bd0ebSMatthew Wilcox (Oracle) bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
13604d7bd0ebSMatthew Wilcox (Oracle) if (WARN_ON_ONCE(bytes == 0))
13614d7bd0ebSMatthew Wilcox (Oracle) return -EIO;
1362afc51aaaSDarrick J. Wong
1363afc51aaaSDarrick J. Wong pos += bytes;
136481ee8e52SMatthew Wilcox (Oracle) length -= bytes;
1365afc51aaaSDarrick J. Wong written += bytes;
136681ee8e52SMatthew Wilcox (Oracle) } while (length > 0);
1367afc51aaaSDarrick J. Wong
136898eb8d95SKaixu Xia if (did_zero)
136998eb8d95SKaixu Xia *did_zero = true;
1370afc51aaaSDarrick J. Wong return written;
1371afc51aaaSDarrick J. Wong }
1372afc51aaaSDarrick J. Wong
1373afc51aaaSDarrick J. Wong int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1374afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1375afc51aaaSDarrick J. Wong const struct iomap_ops *ops)
1376afc51aaaSDarrick J. Wong {
13772aa3048eSChristoph Hellwig struct iomap_iter iter = {
13782aa3048eSChristoph Hellwig .inode = inode,
13792aa3048eSChristoph Hellwig .pos = pos,
13802aa3048eSChristoph Hellwig .len = len,
13812aa3048eSChristoph Hellwig .flags = IOMAP_ZERO,
13822aa3048eSChristoph Hellwig };
13832aa3048eSChristoph Hellwig int ret;
1384afc51aaaSDarrick J. Wong
13852aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0)
13862aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero);
1387afc51aaaSDarrick J. Wong return ret;
1388afc51aaaSDarrick J. Wong }
1389afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range);
1390afc51aaaSDarrick J. Wong
1391afc51aaaSDarrick J. Wong int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1392afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1393afc51aaaSDarrick J. Wong const struct iomap_ops *ops)
1394afc51aaaSDarrick J. Wong {
1395afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode);
1396afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1);
1397afc51aaaSDarrick J. Wong
1398afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */
1399afc51aaaSDarrick J. Wong if (!off)
1400afc51aaaSDarrick J. Wong return 0;
1401afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1402afc51aaaSDarrick J. Wong }
1403afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page);
1404afc51aaaSDarrick J. Wong
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1405ea0f843aSMatthew Wilcox (Oracle) static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1406ea0f843aSMatthew Wilcox (Oracle) struct folio *folio)
1407afc51aaaSDarrick J. Wong {
1408253564baSChristoph Hellwig loff_t length = iomap_length(iter);
1409afc51aaaSDarrick J. Wong int ret;
1410afc51aaaSDarrick J. Wong
1411253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1412d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1413253564baSChristoph Hellwig &iter->iomap);
1414afc51aaaSDarrick J. Wong if (ret)
1415afc51aaaSDarrick J. Wong return ret;
1416ea0f843aSMatthew Wilcox (Oracle) block_commit_write(&folio->page, 0, length);
1417afc51aaaSDarrick J. Wong } else {
1418ea0f843aSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio));
1419ea0f843aSMatthew Wilcox (Oracle) folio_mark_dirty(folio);
1420afc51aaaSDarrick J. Wong }
1421afc51aaaSDarrick J. Wong
1422afc51aaaSDarrick J. Wong return length;
1423afc51aaaSDarrick J. Wong }
1424afc51aaaSDarrick J. Wong
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1425afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1426afc51aaaSDarrick J. Wong {
1427253564baSChristoph Hellwig struct iomap_iter iter = {
1428253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file),
1429253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT,
1430253564baSChristoph Hellwig };
1431ea0f843aSMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page);
1432afc51aaaSDarrick J. Wong ssize_t ret;
1433afc51aaaSDarrick J. Wong
1434ea0f843aSMatthew Wilcox (Oracle) folio_lock(folio);
1435ea0f843aSMatthew Wilcox (Oracle) ret = folio_mkwrite_check_truncate(folio, iter.inode);
1436243145bcSAndreas Gruenbacher if (ret < 0)
1437afc51aaaSDarrick J. Wong goto out_unlock;
1438ea0f843aSMatthew Wilcox (Oracle) iter.pos = folio_pos(folio);
1439253564baSChristoph Hellwig iter.len = ret;
1440253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0)
1441ea0f843aSMatthew Wilcox (Oracle) iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1442afc51aaaSDarrick J. Wong
1443253564baSChristoph Hellwig if (ret < 0)
1444afc51aaaSDarrick J. Wong goto out_unlock;
1445ea0f843aSMatthew Wilcox (Oracle) folio_wait_stable(folio);
1446afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED;
1447afc51aaaSDarrick J. Wong out_unlock:
1448ea0f843aSMatthew Wilcox (Oracle) folio_unlock(folio);
14492ba39cc4SChristoph Hellwig return vmf_fs_error(ret);
1450afc51aaaSDarrick J. Wong }
1451afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1452598ecfbaSChristoph Hellwig
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len,int error)14538ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
14548ffd74e9SMatthew Wilcox (Oracle) size_t len, int error)
1455598ecfbaSChristoph Hellwig {
145604f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
1457598ecfbaSChristoph Hellwig
1458598ecfbaSChristoph Hellwig if (error) {
14598ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio);
1460b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error);
1461598ecfbaSChristoph Hellwig }
1462598ecfbaSChristoph Hellwig
146304f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
146404f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1465598ecfbaSChristoph Hellwig
146604f52c4eSRitesh Harjani (IBM) if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
14678ffd74e9SMatthew Wilcox (Oracle) folio_end_writeback(folio);
1468598ecfbaSChristoph Hellwig }
1469598ecfbaSChristoph Hellwig
1470598ecfbaSChristoph Hellwig /*
1471598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page
1472598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the
1473598ecfbaSChristoph Hellwig * ioend after this.
1474598ecfbaSChristoph Hellwig */
1475ebb7fb15SDave Chinner static u32
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1476598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1477598ecfbaSChristoph Hellwig {
1478598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode;
1479598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio;
1480598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next;
1481598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector;
1482c275779fSZorro Lang loff_t offset = ioend->io_offset;
1483598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET);
1484ebb7fb15SDave Chinner u32 folio_count = 0;
1485598ecfbaSChristoph Hellwig
1486598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) {
14878ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi;
1488598ecfbaSChristoph Hellwig
1489598ecfbaSChristoph Hellwig /*
1490598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we
1491598ecfbaSChristoph Hellwig * need to explicitly end the iteration here.
1492598ecfbaSChristoph Hellwig */
1493598ecfbaSChristoph Hellwig if (bio == last)
1494598ecfbaSChristoph Hellwig next = NULL;
1495598ecfbaSChristoph Hellwig else
1496598ecfbaSChristoph Hellwig next = bio->bi_private;
1497598ecfbaSChristoph Hellwig
14988ffd74e9SMatthew Wilcox (Oracle) /* walk all folios in bio, ending page IO on them */
1499ebb7fb15SDave Chinner bio_for_each_folio_all(fi, bio) {
15008ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_write(inode, fi.folio, fi.length,
15018ffd74e9SMatthew Wilcox (Oracle) error);
1502ebb7fb15SDave Chinner folio_count++;
1503ebb7fb15SDave Chinner }
1504598ecfbaSChristoph Hellwig bio_put(bio);
1505598ecfbaSChristoph Hellwig }
1506c275779fSZorro Lang /* The ioend has been freed by bio_put() */
1507598ecfbaSChristoph Hellwig
1508598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) {
1509598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR
15109cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu",
1511c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start);
1512598ecfbaSChristoph Hellwig }
1513ebb7fb15SDave Chinner return folio_count;
1514598ecfbaSChristoph Hellwig }
1515598ecfbaSChristoph Hellwig
1516ebb7fb15SDave Chinner /*
1517ebb7fb15SDave Chinner * Ioend completion routine for merged bios. This can only be called from task
1518ebb7fb15SDave Chinner * contexts as merged ioends can be of unbound length. Hence we have to break up
1519ebb7fb15SDave Chinner * the writeback completions into manageable chunks to avoid long scheduler
1520ebb7fb15SDave Chinner * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1521ebb7fb15SDave Chinner * good batch processing throughput without creating adverse scheduler latency
1522ebb7fb15SDave Chinner * conditions.
1523ebb7fb15SDave Chinner */
1524598ecfbaSChristoph Hellwig void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1525598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1526598ecfbaSChristoph Hellwig {
1527598ecfbaSChristoph Hellwig struct list_head tmp;
1528ebb7fb15SDave Chinner u32 completions;
1529ebb7fb15SDave Chinner
1530ebb7fb15SDave Chinner might_sleep();
1531598ecfbaSChristoph Hellwig
1532598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp);
1533ebb7fb15SDave Chinner completions = iomap_finish_ioend(ioend, error);
1534598ecfbaSChristoph Hellwig
1535598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) {
1536ebb7fb15SDave Chinner if (completions > IOEND_BATCH_SIZE * 8) {
1537ebb7fb15SDave Chinner cond_resched();
1538ebb7fb15SDave Chinner completions = 0;
1539ebb7fb15SDave Chinner }
1540598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1541598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list);
1542ebb7fb15SDave Chinner completions += iomap_finish_ioend(ioend, error);
1543598ecfbaSChristoph Hellwig }
1544598ecfbaSChristoph Hellwig }
1545598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1546598ecfbaSChristoph Hellwig
1547598ecfbaSChristoph Hellwig /*
1548598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do.
1549598ecfbaSChristoph Hellwig */
1550598ecfbaSChristoph Hellwig static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1551598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1552598ecfbaSChristoph Hellwig {
1553598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1554598ecfbaSChristoph Hellwig return false;
1555598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^
1556598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED))
1557598ecfbaSChristoph Hellwig return false;
1558598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1559598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN))
1560598ecfbaSChristoph Hellwig return false;
1561598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset)
1562598ecfbaSChristoph Hellwig return false;
1563ebb7fb15SDave Chinner /*
1564ebb7fb15SDave Chinner * Do not merge physically discontiguous ioends. The filesystem
1565ebb7fb15SDave Chinner * completion functions will have to iterate the physical
1566ebb7fb15SDave Chinner * discontiguities even if we merge the ioends at a logical level, so
1567ebb7fb15SDave Chinner * we don't gain anything by merging physical discontiguities here.
1568ebb7fb15SDave Chinner *
1569ebb7fb15SDave Chinner * We cannot use bio->bi_iter.bi_sector here as it is modified during
1570ebb7fb15SDave Chinner * submission so does not point to the start sector of the bio at
1571ebb7fb15SDave Chinner * completion.
1572ebb7fb15SDave Chinner */
1573ebb7fb15SDave Chinner if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1574ebb7fb15SDave Chinner return false;
1575598ecfbaSChristoph Hellwig return true;
1576598ecfbaSChristoph Hellwig }
1577598ecfbaSChristoph Hellwig
1578598ecfbaSChristoph Hellwig void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)15796e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1580598ecfbaSChristoph Hellwig {
1581598ecfbaSChristoph Hellwig struct iomap_ioend *next;
1582598ecfbaSChristoph Hellwig
1583598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list);
1584598ecfbaSChristoph Hellwig
1585598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1586598ecfbaSChristoph Hellwig io_list))) {
1587598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next))
1588598ecfbaSChristoph Hellwig break;
1589598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list);
1590598ecfbaSChristoph Hellwig ioend->io_size += next->io_size;
1591598ecfbaSChristoph Hellwig }
1592598ecfbaSChristoph Hellwig }
1593598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1594598ecfbaSChristoph Hellwig
1595598ecfbaSChristoph Hellwig static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)15964f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a,
15974f0f586bSSami Tolvanen const struct list_head *b)
1598598ecfbaSChristoph Hellwig {
1599b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1600b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1601598ecfbaSChristoph Hellwig
1602598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset)
1603598ecfbaSChristoph Hellwig return -1;
1604b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset)
1605598ecfbaSChristoph Hellwig return 1;
1606598ecfbaSChristoph Hellwig return 0;
1607598ecfbaSChristoph Hellwig }
1608598ecfbaSChristoph Hellwig
1609598ecfbaSChristoph Hellwig void
iomap_sort_ioends(struct list_head * ioend_list)1610598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list)
1611598ecfbaSChristoph Hellwig {
1612598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare);
1613598ecfbaSChristoph Hellwig }
1614598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1615598ecfbaSChristoph Hellwig
iomap_writepage_end_bio(struct bio * bio)1616598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio)
1617598ecfbaSChristoph Hellwig {
1618598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private;
1619598ecfbaSChristoph Hellwig
1620598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1621598ecfbaSChristoph Hellwig }
1622598ecfbaSChristoph Hellwig
1623598ecfbaSChristoph Hellwig /*
1624598ecfbaSChristoph Hellwig * Submit the final bio for an ioend.
1625598ecfbaSChristoph Hellwig *
1626598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of
1627f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback
1628598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of
1629598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown.
1630598ecfbaSChristoph Hellwig */
1631598ecfbaSChristoph Hellwig static int
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,struct iomap_ioend * ioend,int error)1632598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1633598ecfbaSChristoph Hellwig int error)
1634598ecfbaSChristoph Hellwig {
1635598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend;
1636598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1637598ecfbaSChristoph Hellwig
1638598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend)
1639598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error);
1640598ecfbaSChristoph Hellwig if (error) {
1641598ecfbaSChristoph Hellwig /*
1642f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an
1643598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately
1644598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in
1645598ecfbaSChristoph Hellwig * time.
1646598ecfbaSChristoph Hellwig */
1647598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error);
1648598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio);
1649598ecfbaSChristoph Hellwig return error;
1650598ecfbaSChristoph Hellwig }
1651598ecfbaSChristoph Hellwig
1652598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio);
1653598ecfbaSChristoph Hellwig return 0;
1654598ecfbaSChristoph Hellwig }
1655598ecfbaSChristoph Hellwig
1656598ecfbaSChristoph Hellwig static struct iomap_ioend *
iomap_alloc_ioend(struct inode * inode,struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector,struct writeback_control * wbc)1657598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1658598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc)
1659598ecfbaSChristoph Hellwig {
1660598ecfbaSChristoph Hellwig struct iomap_ioend *ioend;
1661598ecfbaSChristoph Hellwig struct bio *bio;
1662598ecfbaSChristoph Hellwig
1663609be106SChristoph Hellwig bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1664609be106SChristoph Hellwig REQ_OP_WRITE | wbc_to_write_flags(wbc),
1665609be106SChristoph Hellwig GFP_NOFS, &iomap_ioend_bioset);
1666598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector;
1667598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio);
1668598ecfbaSChristoph Hellwig
1669598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1670598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list);
1671598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type;
1672598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags;
1673598ecfbaSChristoph Hellwig ioend->io_inode = inode;
1674598ecfbaSChristoph Hellwig ioend->io_size = 0;
1675ebb7fb15SDave Chinner ioend->io_folios = 0;
1676598ecfbaSChristoph Hellwig ioend->io_offset = offset;
1677598ecfbaSChristoph Hellwig ioend->io_bio = bio;
1678ebb7fb15SDave Chinner ioend->io_sector = sector;
1679598ecfbaSChristoph Hellwig return ioend;
1680598ecfbaSChristoph Hellwig }
1681598ecfbaSChristoph Hellwig
1682598ecfbaSChristoph Hellwig /*
1683598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one.
1684598ecfbaSChristoph Hellwig *
1685f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order
1686598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the
1687598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend().
1688598ecfbaSChristoph Hellwig */
1689598ecfbaSChristoph Hellwig static struct bio *
iomap_chain_bio(struct bio * prev)1690598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev)
1691598ecfbaSChristoph Hellwig {
1692598ecfbaSChristoph Hellwig struct bio *new;
1693598ecfbaSChristoph Hellwig
169407888c66SChristoph Hellwig new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
169507888c66SChristoph Hellwig bio_clone_blkg_association(new, prev);
1696598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev);
1697598ecfbaSChristoph Hellwig
1698598ecfbaSChristoph Hellwig bio_chain(prev, new);
1699598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */
1700598ecfbaSChristoph Hellwig submit_bio(prev);
1701598ecfbaSChristoph Hellwig return new;
1702598ecfbaSChristoph Hellwig }
1703598ecfbaSChristoph Hellwig
1704598ecfbaSChristoph Hellwig static bool
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector)1705598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1706598ecfbaSChristoph Hellwig sector_t sector)
1707598ecfbaSChristoph Hellwig {
1708598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1709598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED))
1710598ecfbaSChristoph Hellwig return false;
1711598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type)
1712598ecfbaSChristoph Hellwig return false;
1713598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1714598ecfbaSChristoph Hellwig return false;
1715598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio))
1716598ecfbaSChristoph Hellwig return false;
1717ebb7fb15SDave Chinner /*
1718ebb7fb15SDave Chinner * Limit ioend bio chain lengths to minimise IO completion latency. This
1719ebb7fb15SDave Chinner * also prevents long tight loops ending page writeback on all the
1720ebb7fb15SDave Chinner * folios in the ioend.
1721ebb7fb15SDave Chinner */
1722ebb7fb15SDave Chinner if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1723ebb7fb15SDave Chinner return false;
1724598ecfbaSChristoph Hellwig return true;
1725598ecfbaSChristoph Hellwig }
1726598ecfbaSChristoph Hellwig
1727598ecfbaSChristoph Hellwig /*
1728598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to
1729f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another.
1730598ecfbaSChristoph Hellwig */
1731598ecfbaSChristoph Hellwig static void
iomap_add_to_ioend(struct inode * inode,loff_t pos,struct folio * folio,struct iomap_folio_state * ifs,struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct list_head * iolist)1732e735c007SMatthew Wilcox (Oracle) iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
173304f52c4eSRitesh Harjani (IBM) struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
1734598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist)
1735598ecfbaSChristoph Hellwig {
1736e735c007SMatthew Wilcox (Oracle) sector_t sector = iomap_sector(&wpc->iomap, pos);
1737598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode);
1738e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, pos);
1739598ecfbaSChristoph Hellwig
1740e735c007SMatthew Wilcox (Oracle) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1741598ecfbaSChristoph Hellwig if (wpc->ioend)
1742598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist);
1743e735c007SMatthew Wilcox (Oracle) wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1744598ecfbaSChristoph Hellwig }
1745598ecfbaSChristoph Hellwig
1746e735c007SMatthew Wilcox (Oracle) if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1747c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1748c2478469SJohannes Thumshirn bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1749c1b79f11SChristoph Hellwig }
1750c1b79f11SChristoph Hellwig
175104f52c4eSRitesh Harjani (IBM) if (ifs)
175204f52c4eSRitesh Harjani (IBM) atomic_add(len, &ifs->write_bytes_pending);
1753598ecfbaSChristoph Hellwig wpc->ioend->io_size += len;
1754e735c007SMatthew Wilcox (Oracle) wbc_account_cgroup_owner(wbc, &folio->page, len);
1755598ecfbaSChristoph Hellwig }
1756598ecfbaSChristoph Hellwig
1757598ecfbaSChristoph Hellwig /*
1758598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to
1759598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate
1760f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're
1761f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block
1762f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that
1763598ecfbaSChristoph Hellwig * instead.
1764598ecfbaSChristoph Hellwig *
1765598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued
1766598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been
1767598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed,
1768598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging.
1769598ecfbaSChristoph Hellwig *
1770598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the
1771598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit.
1772598ecfbaSChristoph Hellwig */
1773598ecfbaSChristoph Hellwig static int
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,struct folio * folio,u64 end_pos)1774598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1775598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode,
1776e735c007SMatthew Wilcox (Oracle) struct folio *folio, u64 end_pos)
1777598ecfbaSChristoph Hellwig {
17784ce02c67SRitesh Harjani (IBM) struct iomap_folio_state *ifs = folio->private;
1779598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next;
1780598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode);
178192655036SMatthew Wilcox (Oracle) unsigned nblocks = i_blocks_per_folio(inode, folio);
178292655036SMatthew Wilcox (Oracle) u64 pos = folio_pos(folio);
1783598ecfbaSChristoph Hellwig int error = 0, count = 0, i;
1784598ecfbaSChristoph Hellwig LIST_HEAD(submit_list);
1785598ecfbaSChristoph Hellwig
17864ce02c67SRitesh Harjani (IBM) WARN_ON_ONCE(end_pos <= pos);
17874ce02c67SRitesh Harjani (IBM)
17884ce02c67SRitesh Harjani (IBM) if (!ifs && nblocks > 1) {
17894ce02c67SRitesh Harjani (IBM) ifs = ifs_alloc(inode, folio, 0);
17904ce02c67SRitesh Harjani (IBM) iomap_set_range_dirty(folio, 0, end_pos - pos);
17914ce02c67SRitesh Harjani (IBM) }
17924ce02c67SRitesh Harjani (IBM)
179304f52c4eSRitesh Harjani (IBM) WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
1794598ecfbaSChristoph Hellwig
1795598ecfbaSChristoph Hellwig /*
179692655036SMatthew Wilcox (Oracle) * Walk through the folio to find areas to write back. If we
179792655036SMatthew Wilcox (Oracle) * run off the end of the current map or find the current map
179892655036SMatthew Wilcox (Oracle) * invalid, grab a new one.
1799598ecfbaSChristoph Hellwig */
180092655036SMatthew Wilcox (Oracle) for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
18014ce02c67SRitesh Harjani (IBM) if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1802598ecfbaSChristoph Hellwig continue;
1803598ecfbaSChristoph Hellwig
180492655036SMatthew Wilcox (Oracle) error = wpc->ops->map_blocks(wpc, inode, pos);
1805598ecfbaSChristoph Hellwig if (error)
1806598ecfbaSChristoph Hellwig break;
1807adc9c2e5SDarrick J. Wong trace_iomap_writepage_map(inode, &wpc->iomap);
18083e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
18093e19e6f3SChristoph Hellwig continue;
1810598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE)
1811598ecfbaSChristoph Hellwig continue;
181204f52c4eSRitesh Harjani (IBM) iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
1813598ecfbaSChristoph Hellwig &submit_list);
1814598ecfbaSChristoph Hellwig count++;
1815598ecfbaSChristoph Hellwig }
1816ebb7fb15SDave Chinner if (count)
1817ebb7fb15SDave Chinner wpc->ioend->io_folios++;
1818598ecfbaSChristoph Hellwig
1819598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1820e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_locked(folio));
1821e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio));
1822e735c007SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_dirty(folio));
1823598ecfbaSChristoph Hellwig
1824598ecfbaSChristoph Hellwig /*
1825598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have
1826598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O
1827598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback
1828598ecfbaSChristoph Hellwig * appropriately.
1829598ecfbaSChristoph Hellwig */
1830598ecfbaSChristoph Hellwig if (unlikely(error)) {
1831598ecfbaSChristoph Hellwig /*
1832763e4cdcSBrian Foster * Let the filesystem know what portion of the current page
18330ab2a85cSChristoph Hellwig * failed to map.
1834598ecfbaSChristoph Hellwig */
18356e478521SMatthew Wilcox (Oracle) if (wpc->ops->discard_folio)
183692655036SMatthew Wilcox (Oracle) wpc->ops->discard_folio(folio, pos);
1837598ecfbaSChristoph Hellwig }
1838598ecfbaSChristoph Hellwig
18394ce02c67SRitesh Harjani (IBM) /*
18404ce02c67SRitesh Harjani (IBM) * We can have dirty bits set past end of file in page_mkwrite path
18414ce02c67SRitesh Harjani (IBM) * while mapping the last partial folio. Hence it's better to clear
18424ce02c67SRitesh Harjani (IBM) * all the dirty bits in the folio here.
18434ce02c67SRitesh Harjani (IBM) */
18444ce02c67SRitesh Harjani (IBM) iomap_clear_range_dirty(folio, 0, folio_size(folio));
18450ab2a85cSChristoph Hellwig
18460ab2a85cSChristoph Hellwig /*
18470ab2a85cSChristoph Hellwig * If the page hasn't been added to the ioend, it won't be affected by
18480ab2a85cSChristoph Hellwig * I/O completion and we must unlock it now.
18490ab2a85cSChristoph Hellwig */
18500ab2a85cSChristoph Hellwig if (error && !count) {
18510ab2a85cSChristoph Hellwig folio_unlock(folio);
18520ab2a85cSChristoph Hellwig goto done;
18530ab2a85cSChristoph Hellwig }
18540ab2a85cSChristoph Hellwig
1855e735c007SMatthew Wilcox (Oracle) folio_start_writeback(folio);
1856e735c007SMatthew Wilcox (Oracle) folio_unlock(folio);
1857598ecfbaSChristoph Hellwig
1858598ecfbaSChristoph Hellwig /*
1859f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch
1860598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend
1861598ecfbaSChristoph Hellwig * submissions.
1862598ecfbaSChristoph Hellwig */
1863598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1864598ecfbaSChristoph Hellwig int error2;
1865598ecfbaSChristoph Hellwig
1866598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list);
1867598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error);
1868598ecfbaSChristoph Hellwig if (error2 && !error)
1869598ecfbaSChristoph Hellwig error = error2;
1870598ecfbaSChristoph Hellwig }
1871598ecfbaSChristoph Hellwig
1872598ecfbaSChristoph Hellwig /*
1873598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race
1874598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem.
1875598ecfbaSChristoph Hellwig */
1876598ecfbaSChristoph Hellwig if (!count)
1877e735c007SMatthew Wilcox (Oracle) folio_end_writeback(folio);
1878598ecfbaSChristoph Hellwig done:
18793d5f3ba1SDarrick J. Wong mapping_set_error(inode->i_mapping, error);
1880598ecfbaSChristoph Hellwig return error;
1881598ecfbaSChristoph Hellwig }
1882598ecfbaSChristoph Hellwig
1883598ecfbaSChristoph Hellwig /*
1884598ecfbaSChristoph Hellwig * Write out a dirty page.
1885598ecfbaSChristoph Hellwig *
1886f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it.
1887f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to
1888598ecfbaSChristoph Hellwig * regular allocated space.
1889598ecfbaSChristoph Hellwig */
iomap_do_writepage(struct folio * folio,struct writeback_control * wbc,void * data)1890d585bdbeSMatthew Wilcox (Oracle) static int iomap_do_writepage(struct folio *folio,
1891d585bdbeSMatthew Wilcox (Oracle) struct writeback_control *wbc, void *data)
1892598ecfbaSChristoph Hellwig {
1893598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data;
1894e735c007SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
189581d4782aSMatthew Wilcox (Oracle) u64 end_pos, isize;
1896598ecfbaSChristoph Hellwig
1897e735c007SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1898598ecfbaSChristoph Hellwig
1899598ecfbaSChristoph Hellwig /*
1900e735c007SMatthew Wilcox (Oracle) * Refuse to write the folio out if we're called from reclaim context.
1901598ecfbaSChristoph Hellwig *
1902598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in
1903598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly
1904598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low.
1905598ecfbaSChristoph Hellwig *
1906598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so
1907598ecfbaSChristoph Hellwig * warn about it.
1908598ecfbaSChristoph Hellwig */
1909598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1910598ecfbaSChristoph Hellwig PF_MEMALLOC))
1911598ecfbaSChristoph Hellwig goto redirty;
1912598ecfbaSChristoph Hellwig
1913598ecfbaSChristoph Hellwig /*
1914e735c007SMatthew Wilcox (Oracle) * Is this folio beyond the end of the file?
1915598ecfbaSChristoph Hellwig *
1916e735c007SMatthew Wilcox (Oracle) * The folio index is less than the end_index, adjust the end_pos
1917e735c007SMatthew Wilcox (Oracle) * to the highest offset that this folio should represent.
1918598ecfbaSChristoph Hellwig * -----------------------------------------------------
1919598ecfbaSChristoph Hellwig * | file mapping | <EOF> |
1920598ecfbaSChristoph Hellwig * -----------------------------------------------------
1921598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | |
1922598ecfbaSChristoph Hellwig * ^--------------------------------^----------|--------
1923598ecfbaSChristoph Hellwig * | desired writeback range | see else |
1924598ecfbaSChristoph Hellwig * ---------------------------------^------------------|
1925598ecfbaSChristoph Hellwig */
192681d4782aSMatthew Wilcox (Oracle) isize = i_size_read(inode);
1927e735c007SMatthew Wilcox (Oracle) end_pos = folio_pos(folio) + folio_size(folio);
192881d4782aSMatthew Wilcox (Oracle) if (end_pos > isize) {
1929598ecfbaSChristoph Hellwig /*
1930598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles
1931598ecfbaSChristoph Hellwig * i_size or not.
1932598ecfbaSChristoph Hellwig * -------------------------------------------------------
1933598ecfbaSChristoph Hellwig * | file mapping | <EOF> |
1934598ecfbaSChristoph Hellwig * -------------------------------------------------------
1935598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1936598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|---------
1937598ecfbaSChristoph Hellwig * | | Straddles |
1938598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------|
1939598ecfbaSChristoph Hellwig */
1940e735c007SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, isize);
194181d4782aSMatthew Wilcox (Oracle) pgoff_t end_index = isize >> PAGE_SHIFT;
1942598ecfbaSChristoph Hellwig
1943598ecfbaSChristoph Hellwig /*
1944d58562caSChris Mason * Skip the page if it's fully outside i_size, e.g.
1945d58562caSChris Mason * due to a truncate operation that's in progress. We've
1946d58562caSChris Mason * cleaned this page and truncate will finish things off for
1947d58562caSChris Mason * us.
1948598ecfbaSChristoph Hellwig *
1949f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given
1950f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we
1951f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with
1952f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would
1953f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be
1954f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in
1955f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation
1956f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by
1957f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its
1958598ecfbaSChristoph Hellwig * offset is just equal to the EOF.
1959598ecfbaSChristoph Hellwig */
1960e735c007SMatthew Wilcox (Oracle) if (folio->index > end_index ||
1961e735c007SMatthew Wilcox (Oracle) (folio->index == end_index && poff == 0))
1962d58562caSChris Mason goto unlock;
1963598ecfbaSChristoph Hellwig
1964598ecfbaSChristoph Hellwig /*
1965598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each
1966598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped.
1967598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file
1968598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining
1969598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are
1970598ecfbaSChristoph Hellwig * not written out to the file."
1971598ecfbaSChristoph Hellwig */
1972e735c007SMatthew Wilcox (Oracle) folio_zero_segment(folio, poff, folio_size(folio));
197381d4782aSMatthew Wilcox (Oracle) end_pos = isize;
1974598ecfbaSChristoph Hellwig }
1975598ecfbaSChristoph Hellwig
1976e735c007SMatthew Wilcox (Oracle) return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1977598ecfbaSChristoph Hellwig
1978598ecfbaSChristoph Hellwig redirty:
1979e735c007SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio);
1980d58562caSChris Mason unlock:
1981e735c007SMatthew Wilcox (Oracle) folio_unlock(folio);
1982598ecfbaSChristoph Hellwig return 0;
1983598ecfbaSChristoph Hellwig }
1984598ecfbaSChristoph Hellwig
1985598ecfbaSChristoph Hellwig int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1986598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1987598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc,
1988598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops)
1989598ecfbaSChristoph Hellwig {
1990598ecfbaSChristoph Hellwig int ret;
1991598ecfbaSChristoph Hellwig
1992598ecfbaSChristoph Hellwig wpc->ops = ops;
1993598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1994598ecfbaSChristoph Hellwig if (!wpc->ioend)
1995598ecfbaSChristoph Hellwig return ret;
1996598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret);
1997598ecfbaSChristoph Hellwig }
1998598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages);
1999598ecfbaSChristoph Hellwig
iomap_init(void)2000598ecfbaSChristoph Hellwig static int __init iomap_init(void)
2001598ecfbaSChristoph Hellwig {
2002598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2003598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio),
2004598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS);
2005598ecfbaSChristoph Hellwig }
2006598ecfbaSChristoph Hellwig fs_initcall(iomap_init);
2007