xref: /openbmc/linux/fs/iomap/buffered-io.c (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2010 Red Hat, Inc.
4   * Copyright (C) 2016-2019 Christoph Hellwig.
5   */
6  #include <linux/module.h>
7  #include <linux/compiler.h>
8  #include <linux/fs.h>
9  #include <linux/iomap.h>
10  #include <linux/pagemap.h>
11  #include <linux/uio.h>
12  #include <linux/buffer_head.h>
13  #include <linux/dax.h>
14  #include <linux/writeback.h>
15  #include <linux/list_sort.h>
16  #include <linux/swap.h>
17  #include <linux/bio.h>
18  #include <linux/sched/signal.h>
19  #include <linux/migrate.h>
20  #include "trace.h"
21  
22  #include "../internal.h"
23  
24  #define IOEND_BATCH_SIZE	4096
25  
26  typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
27  /*
28   * Structure allocated for each folio to track per-block uptodate, dirty state
29   * and I/O completions.
30   */
31  struct iomap_folio_state {
32  	atomic_t		read_bytes_pending;
33  	atomic_t		write_bytes_pending;
34  	spinlock_t		state_lock;
35  
36  	/*
37  	 * Each block has two bits in this bitmap:
38  	 * Bits [0..blocks_per_folio) has the uptodate status.
39  	 * Bits [b_p_f...(2*b_p_f))   has the dirty status.
40  	 */
41  	unsigned long		state[];
42  };
43  
44  static struct bio_set iomap_ioend_bioset;
45  
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)46  static inline bool ifs_is_fully_uptodate(struct folio *folio,
47  		struct iomap_folio_state *ifs)
48  {
49  	struct inode *inode = folio->mapping->host;
50  
51  	return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
52  }
53  
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)54  static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
55  		unsigned int block)
56  {
57  	return test_bit(block, ifs->state);
58  }
59  
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)60  static void ifs_set_range_uptodate(struct folio *folio,
61  		struct iomap_folio_state *ifs, size_t off, size_t len)
62  {
63  	struct inode *inode = folio->mapping->host;
64  	unsigned int first_blk = off >> inode->i_blkbits;
65  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
66  	unsigned int nr_blks = last_blk - first_blk + 1;
67  	unsigned long flags;
68  
69  	spin_lock_irqsave(&ifs->state_lock, flags);
70  	bitmap_set(ifs->state, first_blk, nr_blks);
71  	if (ifs_is_fully_uptodate(folio, ifs))
72  		folio_mark_uptodate(folio);
73  	spin_unlock_irqrestore(&ifs->state_lock, flags);
74  }
75  
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)76  static void iomap_set_range_uptodate(struct folio *folio, size_t off,
77  		size_t len)
78  {
79  	struct iomap_folio_state *ifs = folio->private;
80  
81  	if (ifs)
82  		ifs_set_range_uptodate(folio, ifs, off, len);
83  	else
84  		folio_mark_uptodate(folio);
85  }
86  
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)87  static inline bool ifs_block_is_dirty(struct folio *folio,
88  		struct iomap_folio_state *ifs, int block)
89  {
90  	struct inode *inode = folio->mapping->host;
91  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
92  
93  	return test_bit(block + blks_per_folio, ifs->state);
94  }
95  
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)96  static void ifs_clear_range_dirty(struct folio *folio,
97  		struct iomap_folio_state *ifs, size_t off, size_t len)
98  {
99  	struct inode *inode = folio->mapping->host;
100  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
101  	unsigned int first_blk = (off >> inode->i_blkbits);
102  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
103  	unsigned int nr_blks = last_blk - first_blk + 1;
104  	unsigned long flags;
105  
106  	spin_lock_irqsave(&ifs->state_lock, flags);
107  	bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
108  	spin_unlock_irqrestore(&ifs->state_lock, flags);
109  }
110  
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)111  static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
112  {
113  	struct iomap_folio_state *ifs = folio->private;
114  
115  	if (ifs)
116  		ifs_clear_range_dirty(folio, ifs, off, len);
117  }
118  
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)119  static void ifs_set_range_dirty(struct folio *folio,
120  		struct iomap_folio_state *ifs, size_t off, size_t len)
121  {
122  	struct inode *inode = folio->mapping->host;
123  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
124  	unsigned int first_blk = (off >> inode->i_blkbits);
125  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
126  	unsigned int nr_blks = last_blk - first_blk + 1;
127  	unsigned long flags;
128  
129  	spin_lock_irqsave(&ifs->state_lock, flags);
130  	bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
131  	spin_unlock_irqrestore(&ifs->state_lock, flags);
132  }
133  
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)134  static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
135  {
136  	struct iomap_folio_state *ifs = folio->private;
137  
138  	if (ifs)
139  		ifs_set_range_dirty(folio, ifs, off, len);
140  }
141  
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)142  static struct iomap_folio_state *ifs_alloc(struct inode *inode,
143  		struct folio *folio, unsigned int flags)
144  {
145  	struct iomap_folio_state *ifs = folio->private;
146  	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
147  	gfp_t gfp;
148  
149  	if (ifs || nr_blocks <= 1)
150  		return ifs;
151  
152  	if (flags & IOMAP_NOWAIT)
153  		gfp = GFP_NOWAIT;
154  	else
155  		gfp = GFP_NOFS | __GFP_NOFAIL;
156  
157  	/*
158  	 * ifs->state tracks two sets of state flags when the
159  	 * filesystem block size is smaller than the folio size.
160  	 * The first state tracks per-block uptodate and the
161  	 * second tracks per-block dirty state.
162  	 */
163  	ifs = kzalloc(struct_size(ifs, state,
164  		      BITS_TO_LONGS(2 * nr_blocks)), gfp);
165  	if (!ifs)
166  		return ifs;
167  
168  	spin_lock_init(&ifs->state_lock);
169  	if (folio_test_uptodate(folio))
170  		bitmap_set(ifs->state, 0, nr_blocks);
171  	if (folio_test_dirty(folio))
172  		bitmap_set(ifs->state, nr_blocks, nr_blocks);
173  	folio_attach_private(folio, ifs);
174  
175  	return ifs;
176  }
177  
ifs_free(struct folio * folio)178  static void ifs_free(struct folio *folio)
179  {
180  	struct iomap_folio_state *ifs = folio_detach_private(folio);
181  
182  	if (!ifs)
183  		return;
184  	WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
185  	WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
186  	WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
187  			folio_test_uptodate(folio));
188  	kfree(ifs);
189  }
190  
191  /*
192   * Calculate the range inside the folio that we actually need to read.
193   */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)194  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
195  		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
196  {
197  	struct iomap_folio_state *ifs = folio->private;
198  	loff_t orig_pos = *pos;
199  	loff_t isize = i_size_read(inode);
200  	unsigned block_bits = inode->i_blkbits;
201  	unsigned block_size = (1 << block_bits);
202  	size_t poff = offset_in_folio(folio, *pos);
203  	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
204  	size_t orig_plen = plen;
205  	unsigned first = poff >> block_bits;
206  	unsigned last = (poff + plen - 1) >> block_bits;
207  
208  	/*
209  	 * If the block size is smaller than the page size, we need to check the
210  	 * per-block uptodate status and adjust the offset and length if needed
211  	 * to avoid reading in already uptodate ranges.
212  	 */
213  	if (ifs) {
214  		unsigned int i;
215  
216  		/* move forward for each leading block marked uptodate */
217  		for (i = first; i <= last; i++) {
218  			if (!ifs_block_is_uptodate(ifs, i))
219  				break;
220  			*pos += block_size;
221  			poff += block_size;
222  			plen -= block_size;
223  			first++;
224  		}
225  
226  		/* truncate len if we find any trailing uptodate block(s) */
227  		for ( ; i <= last; i++) {
228  			if (ifs_block_is_uptodate(ifs, i)) {
229  				plen -= (last - i + 1) * block_size;
230  				last = i - 1;
231  				break;
232  			}
233  		}
234  	}
235  
236  	/*
237  	 * If the extent spans the block that contains the i_size, we need to
238  	 * handle both halves separately so that we properly zero data in the
239  	 * page cache for blocks that are entirely outside of i_size.
240  	 */
241  	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
242  		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
243  
244  		if (first <= end && last > end)
245  			plen -= (last - end) * block_size;
246  	}
247  
248  	*offp = poff;
249  	*lenp = plen;
250  }
251  
iomap_finish_folio_read(struct folio * folio,size_t offset,size_t len,int error)252  static void iomap_finish_folio_read(struct folio *folio, size_t offset,
253  		size_t len, int error)
254  {
255  	struct iomap_folio_state *ifs = folio->private;
256  
257  	if (unlikely(error)) {
258  		folio_clear_uptodate(folio);
259  		folio_set_error(folio);
260  	} else {
261  		iomap_set_range_uptodate(folio, offset, len);
262  	}
263  
264  	if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
265  		folio_unlock(folio);
266  }
267  
iomap_read_end_io(struct bio * bio)268  static void iomap_read_end_io(struct bio *bio)
269  {
270  	int error = blk_status_to_errno(bio->bi_status);
271  	struct folio_iter fi;
272  
273  	bio_for_each_folio_all(fi, bio)
274  		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
275  	bio_put(bio);
276  }
277  
278  struct iomap_readpage_ctx {
279  	struct folio		*cur_folio;
280  	bool			cur_folio_in_bio;
281  	struct bio		*bio;
282  	struct readahead_control *rac;
283  };
284  
285  /**
286   * iomap_read_inline_data - copy inline data into the page cache
287   * @iter: iteration structure
288   * @folio: folio to copy to
289   *
290   * Copy the inline data in @iter into @folio and zero out the rest of the folio.
291   * Only a single IOMAP_INLINE extent is allowed at the end of each file.
292   * Returns zero for success to complete the read, or the usual negative errno.
293   */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)294  static int iomap_read_inline_data(const struct iomap_iter *iter,
295  		struct folio *folio)
296  {
297  	const struct iomap *iomap = iomap_iter_srcmap(iter);
298  	size_t size = i_size_read(iter->inode) - iomap->offset;
299  	size_t poff = offset_in_page(iomap->offset);
300  	size_t offset = offset_in_folio(folio, iomap->offset);
301  	void *addr;
302  
303  	if (folio_test_uptodate(folio))
304  		return 0;
305  
306  	if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
307  		return -EIO;
308  	if (WARN_ON_ONCE(size > PAGE_SIZE -
309  			 offset_in_page(iomap->inline_data)))
310  		return -EIO;
311  	if (WARN_ON_ONCE(size > iomap->length))
312  		return -EIO;
313  	if (offset > 0)
314  		ifs_alloc(iter->inode, folio, iter->flags);
315  
316  	addr = kmap_local_folio(folio, offset);
317  	memcpy(addr, iomap->inline_data, size);
318  	memset(addr + size, 0, PAGE_SIZE - poff - size);
319  	kunmap_local(addr);
320  	iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
321  	return 0;
322  }
323  
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)324  static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
325  		loff_t pos)
326  {
327  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
328  
329  	return srcmap->type != IOMAP_MAPPED ||
330  		(srcmap->flags & IOMAP_F_NEW) ||
331  		pos >= i_size_read(iter->inode);
332  }
333  
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)334  static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
335  		struct iomap_readpage_ctx *ctx, loff_t offset)
336  {
337  	const struct iomap *iomap = &iter->iomap;
338  	loff_t pos = iter->pos + offset;
339  	loff_t length = iomap_length(iter) - offset;
340  	struct folio *folio = ctx->cur_folio;
341  	struct iomap_folio_state *ifs;
342  	loff_t orig_pos = pos;
343  	size_t poff, plen;
344  	sector_t sector;
345  
346  	if (iomap->type == IOMAP_INLINE)
347  		return iomap_read_inline_data(iter, folio);
348  
349  	/* zero post-eof blocks as the page may be mapped */
350  	ifs = ifs_alloc(iter->inode, folio, iter->flags);
351  	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
352  	if (plen == 0)
353  		goto done;
354  
355  	if (iomap_block_needs_zeroing(iter, pos)) {
356  		folio_zero_range(folio, poff, plen);
357  		iomap_set_range_uptodate(folio, poff, plen);
358  		goto done;
359  	}
360  
361  	ctx->cur_folio_in_bio = true;
362  	if (ifs)
363  		atomic_add(plen, &ifs->read_bytes_pending);
364  
365  	sector = iomap_sector(iomap, pos);
366  	if (!ctx->bio ||
367  	    bio_end_sector(ctx->bio) != sector ||
368  	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
369  		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
370  		gfp_t orig_gfp = gfp;
371  		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
372  
373  		if (ctx->bio)
374  			submit_bio(ctx->bio);
375  
376  		if (ctx->rac) /* same as readahead_gfp_mask */
377  			gfp |= __GFP_NORETRY | __GFP_NOWARN;
378  		ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
379  				     REQ_OP_READ, gfp);
380  		/*
381  		 * If the bio_alloc fails, try it again for a single page to
382  		 * avoid having to deal with partial page reads.  This emulates
383  		 * what do_mpage_read_folio does.
384  		 */
385  		if (!ctx->bio) {
386  			ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
387  					     orig_gfp);
388  		}
389  		if (ctx->rac)
390  			ctx->bio->bi_opf |= REQ_RAHEAD;
391  		ctx->bio->bi_iter.bi_sector = sector;
392  		ctx->bio->bi_end_io = iomap_read_end_io;
393  		bio_add_folio_nofail(ctx->bio, folio, plen, poff);
394  	}
395  
396  done:
397  	/*
398  	 * Move the caller beyond our range so that it keeps making progress.
399  	 * For that, we have to include any leading non-uptodate ranges, but
400  	 * we can skip trailing ones as they will be handled in the next
401  	 * iteration.
402  	 */
403  	return pos - orig_pos + plen;
404  }
405  
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)406  int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
407  {
408  	struct iomap_iter iter = {
409  		.inode		= folio->mapping->host,
410  		.pos		= folio_pos(folio),
411  		.len		= folio_size(folio),
412  	};
413  	struct iomap_readpage_ctx ctx = {
414  		.cur_folio	= folio,
415  	};
416  	int ret;
417  
418  	trace_iomap_readpage(iter.inode, 1);
419  
420  	while ((ret = iomap_iter(&iter, ops)) > 0)
421  		iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
422  
423  	if (ret < 0)
424  		folio_set_error(folio);
425  
426  	if (ctx.bio) {
427  		submit_bio(ctx.bio);
428  		WARN_ON_ONCE(!ctx.cur_folio_in_bio);
429  	} else {
430  		WARN_ON_ONCE(ctx.cur_folio_in_bio);
431  		folio_unlock(folio);
432  	}
433  
434  	/*
435  	 * Just like mpage_readahead and block_read_full_folio, we always
436  	 * return 0 and just set the folio error flag on errors.  This
437  	 * should be cleaned up throughout the stack eventually.
438  	 */
439  	return 0;
440  }
441  EXPORT_SYMBOL_GPL(iomap_read_folio);
442  
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)443  static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
444  		struct iomap_readpage_ctx *ctx)
445  {
446  	loff_t length = iomap_length(iter);
447  	loff_t done, ret;
448  
449  	for (done = 0; done < length; done += ret) {
450  		if (ctx->cur_folio &&
451  		    offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
452  			if (!ctx->cur_folio_in_bio)
453  				folio_unlock(ctx->cur_folio);
454  			ctx->cur_folio = NULL;
455  		}
456  		if (!ctx->cur_folio) {
457  			ctx->cur_folio = readahead_folio(ctx->rac);
458  			ctx->cur_folio_in_bio = false;
459  		}
460  		ret = iomap_readpage_iter(iter, ctx, done);
461  		if (ret <= 0)
462  			return ret;
463  	}
464  
465  	return done;
466  }
467  
468  /**
469   * iomap_readahead - Attempt to read pages from a file.
470   * @rac: Describes the pages to be read.
471   * @ops: The operations vector for the filesystem.
472   *
473   * This function is for filesystems to call to implement their readahead
474   * address_space operation.
475   *
476   * Context: The @ops callbacks may submit I/O (eg to read the addresses of
477   * blocks from disc), and may wait for it.  The caller may be trying to
478   * access a different page, and so sleeping excessively should be avoided.
479   * It may allocate memory, but should avoid costly allocations.  This
480   * function is called with memalloc_nofs set, so allocations will not cause
481   * the filesystem to be reentered.
482   */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)483  void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
484  {
485  	struct iomap_iter iter = {
486  		.inode	= rac->mapping->host,
487  		.pos	= readahead_pos(rac),
488  		.len	= readahead_length(rac),
489  	};
490  	struct iomap_readpage_ctx ctx = {
491  		.rac	= rac,
492  	};
493  
494  	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
495  
496  	while (iomap_iter(&iter, ops) > 0)
497  		iter.processed = iomap_readahead_iter(&iter, &ctx);
498  
499  	if (ctx.bio)
500  		submit_bio(ctx.bio);
501  	if (ctx.cur_folio) {
502  		if (!ctx.cur_folio_in_bio)
503  			folio_unlock(ctx.cur_folio);
504  	}
505  }
506  EXPORT_SYMBOL_GPL(iomap_readahead);
507  
508  /*
509   * iomap_is_partially_uptodate checks whether blocks within a folio are
510   * uptodate or not.
511   *
512   * Returns true if all blocks which correspond to the specified part
513   * of the folio are uptodate.
514   */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)515  bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
516  {
517  	struct iomap_folio_state *ifs = folio->private;
518  	struct inode *inode = folio->mapping->host;
519  	unsigned first, last, i;
520  
521  	if (!ifs)
522  		return false;
523  
524  	/* Caller's range may extend past the end of this folio */
525  	count = min(folio_size(folio) - from, count);
526  
527  	/* First and last blocks in range within folio */
528  	first = from >> inode->i_blkbits;
529  	last = (from + count - 1) >> inode->i_blkbits;
530  
531  	for (i = first; i <= last; i++)
532  		if (!ifs_block_is_uptodate(ifs, i))
533  			return false;
534  	return true;
535  }
536  EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
537  
538  /**
539   * iomap_get_folio - get a folio reference for writing
540   * @iter: iteration structure
541   * @pos: start offset of write
542   * @len: Suggested size of folio to create.
543   *
544   * Returns a locked reference to the folio at @pos, or an error pointer if the
545   * folio could not be obtained.
546   */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)547  struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
548  {
549  	fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
550  
551  	if (iter->flags & IOMAP_NOWAIT)
552  		fgp |= FGP_NOWAIT;
553  	fgp |= fgf_set_order(len);
554  
555  	return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
556  			fgp, mapping_gfp_mask(iter->inode->i_mapping));
557  }
558  EXPORT_SYMBOL_GPL(iomap_get_folio);
559  
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)560  bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
561  {
562  	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
563  			folio_size(folio));
564  
565  	/*
566  	 * If the folio is dirty, we refuse to release our metadata because
567  	 * it may be partially dirty.  Once we track per-block dirty state,
568  	 * we can release the metadata if every block is dirty.
569  	 */
570  	if (folio_test_dirty(folio))
571  		return false;
572  	ifs_free(folio);
573  	return true;
574  }
575  EXPORT_SYMBOL_GPL(iomap_release_folio);
576  
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)577  void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
578  {
579  	trace_iomap_invalidate_folio(folio->mapping->host,
580  					folio_pos(folio) + offset, len);
581  
582  	/*
583  	 * If we're invalidating the entire folio, clear the dirty state
584  	 * from it and release it to avoid unnecessary buildup of the LRU.
585  	 */
586  	if (offset == 0 && len == folio_size(folio)) {
587  		WARN_ON_ONCE(folio_test_writeback(folio));
588  		folio_cancel_dirty(folio);
589  		ifs_free(folio);
590  	}
591  }
592  EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
593  
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)594  bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
595  {
596  	struct inode *inode = mapping->host;
597  	size_t len = folio_size(folio);
598  
599  	ifs_alloc(inode, folio, 0);
600  	iomap_set_range_dirty(folio, 0, len);
601  	return filemap_dirty_folio(mapping, folio);
602  }
603  EXPORT_SYMBOL_GPL(iomap_dirty_folio);
604  
605  static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)606  iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
607  {
608  	loff_t i_size = i_size_read(inode);
609  
610  	/*
611  	 * Only truncate newly allocated pages beyoned EOF, even if the
612  	 * write started inside the existing inode size.
613  	 */
614  	if (pos + len > i_size)
615  		truncate_pagecache_range(inode, max(pos, i_size),
616  					 pos + len - 1);
617  }
618  
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)619  static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
620  		size_t poff, size_t plen, const struct iomap *iomap)
621  {
622  	struct bio_vec bvec;
623  	struct bio bio;
624  
625  	bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
626  	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
627  	bio_add_folio_nofail(&bio, folio, plen, poff);
628  	return submit_bio_wait(&bio);
629  }
630  
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)631  static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
632  		size_t len, struct folio *folio)
633  {
634  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
635  	struct iomap_folio_state *ifs;
636  	loff_t block_size = i_blocksize(iter->inode);
637  	loff_t block_start = round_down(pos, block_size);
638  	loff_t block_end = round_up(pos + len, block_size);
639  	unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
640  	size_t from = offset_in_folio(folio, pos), to = from + len;
641  	size_t poff, plen;
642  
643  	/*
644  	 * If the write or zeroing completely overlaps the current folio, then
645  	 * entire folio will be dirtied so there is no need for
646  	 * per-block state tracking structures to be attached to this folio.
647  	 * For the unshare case, we must read in the ondisk contents because we
648  	 * are not changing pagecache contents.
649  	 */
650  	if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
651  	    pos + len >= folio_pos(folio) + folio_size(folio))
652  		return 0;
653  
654  	ifs = ifs_alloc(iter->inode, folio, iter->flags);
655  	if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
656  		return -EAGAIN;
657  
658  	if (folio_test_uptodate(folio))
659  		return 0;
660  	folio_clear_error(folio);
661  
662  	do {
663  		iomap_adjust_read_range(iter->inode, folio, &block_start,
664  				block_end - block_start, &poff, &plen);
665  		if (plen == 0)
666  			break;
667  
668  		if (!(iter->flags & IOMAP_UNSHARE) &&
669  		    (from <= poff || from >= poff + plen) &&
670  		    (to <= poff || to >= poff + plen))
671  			continue;
672  
673  		if (iomap_block_needs_zeroing(iter, block_start)) {
674  			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
675  				return -EIO;
676  			folio_zero_segments(folio, poff, from, to, poff + plen);
677  		} else {
678  			int status;
679  
680  			if (iter->flags & IOMAP_NOWAIT)
681  				return -EAGAIN;
682  
683  			status = iomap_read_folio_sync(block_start, folio,
684  					poff, plen, srcmap);
685  			if (status)
686  				return status;
687  		}
688  		iomap_set_range_uptodate(folio, poff, plen);
689  	} while ((block_start += plen) < block_end);
690  
691  	return 0;
692  }
693  
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)694  static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
695  		size_t len)
696  {
697  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
698  
699  	if (folio_ops && folio_ops->get_folio)
700  		return folio_ops->get_folio(iter, pos, len);
701  	else
702  		return iomap_get_folio(iter, pos, len);
703  }
704  
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)705  static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
706  		struct folio *folio)
707  {
708  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
709  
710  	if (folio_ops && folio_ops->put_folio) {
711  		folio_ops->put_folio(iter->inode, pos, ret, folio);
712  	} else {
713  		folio_unlock(folio);
714  		folio_put(folio);
715  	}
716  }
717  
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)718  static int iomap_write_begin_inline(const struct iomap_iter *iter,
719  		struct folio *folio)
720  {
721  	/* needs more work for the tailpacking case; disable for now */
722  	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
723  		return -EIO;
724  	return iomap_read_inline_data(iter, folio);
725  }
726  
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)727  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
728  		size_t len, struct folio **foliop)
729  {
730  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
731  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
732  	struct folio *folio;
733  	int status = 0;
734  
735  	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
736  	if (srcmap != &iter->iomap)
737  		BUG_ON(pos + len > srcmap->offset + srcmap->length);
738  
739  	if (fatal_signal_pending(current))
740  		return -EINTR;
741  
742  	if (!mapping_large_folio_support(iter->inode->i_mapping))
743  		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
744  
745  	folio = __iomap_get_folio(iter, pos, len);
746  	if (IS_ERR(folio))
747  		return PTR_ERR(folio);
748  
749  	/*
750  	 * Now we have a locked folio, before we do anything with it we need to
751  	 * check that the iomap we have cached is not stale. The inode extent
752  	 * mapping can change due to concurrent IO in flight (e.g.
753  	 * IOMAP_UNWRITTEN state can change and memory reclaim could have
754  	 * reclaimed a previously partially written page at this index after IO
755  	 * completion before this write reaches this file offset) and hence we
756  	 * could do the wrong thing here (zero a page range incorrectly or fail
757  	 * to zero) and corrupt data.
758  	 */
759  	if (folio_ops && folio_ops->iomap_valid) {
760  		bool iomap_valid = folio_ops->iomap_valid(iter->inode,
761  							 &iter->iomap);
762  		if (!iomap_valid) {
763  			iter->iomap.flags |= IOMAP_F_STALE;
764  			status = 0;
765  			goto out_unlock;
766  		}
767  	}
768  
769  	if (pos + len > folio_pos(folio) + folio_size(folio))
770  		len = folio_pos(folio) + folio_size(folio) - pos;
771  
772  	if (srcmap->type == IOMAP_INLINE)
773  		status = iomap_write_begin_inline(iter, folio);
774  	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
775  		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
776  	else
777  		status = __iomap_write_begin(iter, pos, len, folio);
778  
779  	if (unlikely(status))
780  		goto out_unlock;
781  
782  	*foliop = folio;
783  	return 0;
784  
785  out_unlock:
786  	__iomap_put_folio(iter, pos, 0, folio);
787  	iomap_write_failed(iter->inode, pos, len);
788  
789  	return status;
790  }
791  
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)792  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
793  		size_t copied, struct folio *folio)
794  {
795  	flush_dcache_folio(folio);
796  
797  	/*
798  	 * The blocks that were entirely written will now be uptodate, so we
799  	 * don't have to worry about a read_folio reading them and overwriting a
800  	 * partial write.  However, if we've encountered a short write and only
801  	 * partially written into a block, it will not be marked uptodate, so a
802  	 * read_folio might come in and destroy our partial write.
803  	 *
804  	 * Do the simplest thing and just treat any short write to a
805  	 * non-uptodate page as a zero-length write, and force the caller to
806  	 * redo the whole thing.
807  	 */
808  	if (unlikely(copied < len && !folio_test_uptodate(folio)))
809  		return 0;
810  	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
811  	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
812  	filemap_dirty_folio(inode->i_mapping, folio);
813  	return copied;
814  }
815  
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)816  static size_t iomap_write_end_inline(const struct iomap_iter *iter,
817  		struct folio *folio, loff_t pos, size_t copied)
818  {
819  	const struct iomap *iomap = &iter->iomap;
820  	void *addr;
821  
822  	WARN_ON_ONCE(!folio_test_uptodate(folio));
823  	BUG_ON(!iomap_inline_data_valid(iomap));
824  
825  	flush_dcache_folio(folio);
826  	addr = kmap_local_folio(folio, pos);
827  	memcpy(iomap_inline_data(iomap, pos), addr, copied);
828  	kunmap_local(addr);
829  
830  	mark_inode_dirty(iter->inode);
831  	return copied;
832  }
833  
834  /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)835  static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
836  		size_t copied, struct folio *folio)
837  {
838  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
839  	loff_t old_size = iter->inode->i_size;
840  	size_t ret;
841  
842  	if (srcmap->type == IOMAP_INLINE) {
843  		ret = iomap_write_end_inline(iter, folio, pos, copied);
844  	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
845  		ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
846  				copied, &folio->page, NULL);
847  	} else {
848  		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
849  	}
850  
851  	/*
852  	 * Update the in-memory inode size after copying the data into the page
853  	 * cache.  It's up to the file system to write the updated size to disk,
854  	 * preferably after I/O completion so that no stale data is exposed.
855  	 */
856  	if (pos + ret > old_size) {
857  		i_size_write(iter->inode, pos + ret);
858  		iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
859  	}
860  	__iomap_put_folio(iter, pos, ret, folio);
861  
862  	if (old_size < pos)
863  		pagecache_isize_extended(iter->inode, old_size, pos);
864  	if (ret < len)
865  		iomap_write_failed(iter->inode, pos + ret, len - ret);
866  	return ret;
867  }
868  
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)869  static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
870  {
871  	loff_t length = iomap_length(iter);
872  	loff_t pos = iter->pos;
873  	ssize_t written = 0;
874  	long status = 0;
875  	struct address_space *mapping = iter->inode->i_mapping;
876  	size_t chunk = mapping_max_folio_size(mapping);
877  	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
878  
879  	do {
880  		struct folio *folio;
881  		size_t offset;		/* Offset into folio */
882  		size_t bytes;		/* Bytes to write to folio */
883  		size_t copied;		/* Bytes copied from user */
884  
885  		bytes = iov_iter_count(i);
886  retry:
887  		offset = pos & (chunk - 1);
888  		bytes = min(chunk - offset, bytes);
889  		status = balance_dirty_pages_ratelimited_flags(mapping,
890  							       bdp_flags);
891  		if (unlikely(status))
892  			break;
893  
894  		if (bytes > length)
895  			bytes = length;
896  
897  		/*
898  		 * Bring in the user page that we'll copy from _first_.
899  		 * Otherwise there's a nasty deadlock on copying from the
900  		 * same page as we're writing to, without it being marked
901  		 * up-to-date.
902  		 *
903  		 * For async buffered writes the assumption is that the user
904  		 * page has already been faulted in. This can be optimized by
905  		 * faulting the user page.
906  		 */
907  		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
908  			status = -EFAULT;
909  			break;
910  		}
911  
912  		status = iomap_write_begin(iter, pos, bytes, &folio);
913  		if (unlikely(status))
914  			break;
915  		if (iter->iomap.flags & IOMAP_F_STALE)
916  			break;
917  
918  		offset = offset_in_folio(folio, pos);
919  		if (bytes > folio_size(folio) - offset)
920  			bytes = folio_size(folio) - offset;
921  
922  		if (mapping_writably_mapped(mapping))
923  			flush_dcache_folio(folio);
924  
925  		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
926  		status = iomap_write_end(iter, pos, bytes, copied, folio);
927  
928  		if (unlikely(copied != status))
929  			iov_iter_revert(i, copied - status);
930  
931  		cond_resched();
932  		if (unlikely(status == 0)) {
933  			/*
934  			 * A short copy made iomap_write_end() reject the
935  			 * thing entirely.  Might be memory poisoning
936  			 * halfway through, might be a race with munmap,
937  			 * might be severe memory pressure.
938  			 */
939  			if (chunk > PAGE_SIZE)
940  				chunk /= 2;
941  			if (copied) {
942  				bytes = copied;
943  				goto retry;
944  			}
945  		} else {
946  			pos += status;
947  			written += status;
948  			length -= status;
949  		}
950  	} while (iov_iter_count(i) && length);
951  
952  	if (status == -EAGAIN) {
953  		iov_iter_revert(i, written);
954  		return -EAGAIN;
955  	}
956  	return written ? written : status;
957  }
958  
959  ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops)960  iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
961  		const struct iomap_ops *ops)
962  {
963  	struct iomap_iter iter = {
964  		.inode		= iocb->ki_filp->f_mapping->host,
965  		.pos		= iocb->ki_pos,
966  		.len		= iov_iter_count(i),
967  		.flags		= IOMAP_WRITE,
968  	};
969  	ssize_t ret;
970  
971  	if (iocb->ki_flags & IOCB_NOWAIT)
972  		iter.flags |= IOMAP_NOWAIT;
973  
974  	while ((ret = iomap_iter(&iter, ops)) > 0)
975  		iter.processed = iomap_write_iter(&iter, i);
976  
977  	if (unlikely(iter.pos == iocb->ki_pos))
978  		return ret;
979  	ret = iter.pos - iocb->ki_pos;
980  	iocb->ki_pos = iter.pos;
981  	return ret;
982  }
983  EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
984  
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)985  static int iomap_write_delalloc_ifs_punch(struct inode *inode,
986  		struct folio *folio, loff_t start_byte, loff_t end_byte,
987  		iomap_punch_t punch)
988  {
989  	unsigned int first_blk, last_blk, i;
990  	loff_t last_byte;
991  	u8 blkbits = inode->i_blkbits;
992  	struct iomap_folio_state *ifs;
993  	int ret = 0;
994  
995  	/*
996  	 * When we have per-block dirty tracking, there can be
997  	 * blocks within a folio which are marked uptodate
998  	 * but not dirty. In that case it is necessary to punch
999  	 * out such blocks to avoid leaking any delalloc blocks.
1000  	 */
1001  	ifs = folio->private;
1002  	if (!ifs)
1003  		return ret;
1004  
1005  	last_byte = min_t(loff_t, end_byte - 1,
1006  			folio_pos(folio) + folio_size(folio) - 1);
1007  	first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1008  	last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1009  	for (i = first_blk; i <= last_blk; i++) {
1010  		if (!ifs_block_is_dirty(folio, ifs, i)) {
1011  			ret = punch(inode, folio_pos(folio) + (i << blkbits),
1012  				    1 << blkbits);
1013  			if (ret)
1014  				return ret;
1015  		}
1016  	}
1017  
1018  	return ret;
1019  }
1020  
1021  
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1022  static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1023  		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1024  		iomap_punch_t punch)
1025  {
1026  	int ret = 0;
1027  
1028  	if (!folio_test_dirty(folio))
1029  		return ret;
1030  
1031  	/* if dirty, punch up to offset */
1032  	if (start_byte > *punch_start_byte) {
1033  		ret = punch(inode, *punch_start_byte,
1034  				start_byte - *punch_start_byte);
1035  		if (ret)
1036  			return ret;
1037  	}
1038  
1039  	/* Punch non-dirty blocks within folio */
1040  	ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1041  			end_byte, punch);
1042  	if (ret)
1043  		return ret;
1044  
1045  	/*
1046  	 * Make sure the next punch start is correctly bound to
1047  	 * the end of this data range, not the end of the folio.
1048  	 */
1049  	*punch_start_byte = min_t(loff_t, end_byte,
1050  				folio_pos(folio) + folio_size(folio));
1051  
1052  	return ret;
1053  }
1054  
1055  /*
1056   * Scan the data range passed to us for dirty page cache folios. If we find a
1057   * dirty folio, punch out the preceding range and update the offset from which
1058   * the next punch will start from.
1059   *
1060   * We can punch out storage reservations under clean pages because they either
1061   * contain data that has been written back - in which case the delalloc punch
1062   * over that range is a no-op - or they have been read faults in which case they
1063   * contain zeroes and we can remove the delalloc backing range and any new
1064   * writes to those pages will do the normal hole filling operation...
1065   *
1066   * This makes the logic simple: we only need to keep the delalloc extents only
1067   * over the dirty ranges of the page cache.
1068   *
1069   * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1070   * simplify range iterations.
1071   */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1072  static int iomap_write_delalloc_scan(struct inode *inode,
1073  		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1074  		iomap_punch_t punch)
1075  {
1076  	while (start_byte < end_byte) {
1077  		struct folio	*folio;
1078  		int ret;
1079  
1080  		/* grab locked page */
1081  		folio = filemap_lock_folio(inode->i_mapping,
1082  				start_byte >> PAGE_SHIFT);
1083  		if (IS_ERR(folio)) {
1084  			start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1085  					PAGE_SIZE;
1086  			continue;
1087  		}
1088  
1089  		ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1090  						 start_byte, end_byte, punch);
1091  		if (ret) {
1092  			folio_unlock(folio);
1093  			folio_put(folio);
1094  			return ret;
1095  		}
1096  
1097  		/* move offset to start of next folio in range */
1098  		start_byte = folio_pos(folio) + folio_size(folio);
1099  		folio_unlock(folio);
1100  		folio_put(folio);
1101  	}
1102  	return 0;
1103  }
1104  
1105  /*
1106   * Punch out all the delalloc blocks in the range given except for those that
1107   * have dirty data still pending in the page cache - those are going to be
1108   * written and so must still retain the delalloc backing for writeback.
1109   *
1110   * As we are scanning the page cache for data, we don't need to reimplement the
1111   * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1112   * start and end of data ranges correctly even for sub-folio block sizes. This
1113   * byte range based iteration is especially convenient because it means we
1114   * don't have to care about variable size folios, nor where the start or end of
1115   * the data range lies within a folio, if they lie within the same folio or even
1116   * if there are multiple discontiguous data ranges within the folio.
1117   *
1118   * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1119   * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1120   * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1121   * date. A write page fault can then mark it dirty. If we then fail a write()
1122   * beyond EOF into that up to date cached range, we allocate a delalloc block
1123   * beyond EOF and then have to punch it out. Because the range is up to date,
1124   * mapping_seek_hole_data() will return it, and we will skip the punch because
1125   * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1126   * beyond EOF in this case as writeback will never write back and covert that
1127   * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1128   * resulting in always punching out the range from the EOF to the end of the
1129   * range the iomap spans.
1130   *
1131   * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1132   * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1133   * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1134   * returns the end of the data range (data_end). Using closed intervals would
1135   * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1136   * the code to subtle off-by-one bugs....
1137   */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,iomap_punch_t punch)1138  static int iomap_write_delalloc_release(struct inode *inode,
1139  		loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1140  {
1141  	loff_t punch_start_byte = start_byte;
1142  	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1143  	int error = 0;
1144  
1145  	/*
1146  	 * Lock the mapping to avoid races with page faults re-instantiating
1147  	 * folios and dirtying them via ->page_mkwrite whilst we walk the
1148  	 * cache and perform delalloc extent removal. Failing to do this can
1149  	 * leave dirty pages with no space reservation in the cache.
1150  	 */
1151  	filemap_invalidate_lock(inode->i_mapping);
1152  	while (start_byte < scan_end_byte) {
1153  		loff_t		data_end;
1154  
1155  		start_byte = mapping_seek_hole_data(inode->i_mapping,
1156  				start_byte, scan_end_byte, SEEK_DATA);
1157  		/*
1158  		 * If there is no more data to scan, all that is left is to
1159  		 * punch out the remaining range.
1160  		 */
1161  		if (start_byte == -ENXIO || start_byte == scan_end_byte)
1162  			break;
1163  		if (start_byte < 0) {
1164  			error = start_byte;
1165  			goto out_unlock;
1166  		}
1167  		WARN_ON_ONCE(start_byte < punch_start_byte);
1168  		WARN_ON_ONCE(start_byte > scan_end_byte);
1169  
1170  		/*
1171  		 * We find the end of this contiguous cached data range by
1172  		 * seeking from start_byte to the beginning of the next hole.
1173  		 */
1174  		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1175  				scan_end_byte, SEEK_HOLE);
1176  		if (data_end < 0) {
1177  			error = data_end;
1178  			goto out_unlock;
1179  		}
1180  
1181  		/*
1182  		 * If we race with post-direct I/O invalidation of the page cache,
1183  		 * there might be no data left at start_byte.
1184  		 */
1185  		if (data_end == start_byte)
1186  			continue;
1187  
1188  		WARN_ON_ONCE(data_end < start_byte);
1189  		WARN_ON_ONCE(data_end > scan_end_byte);
1190  
1191  		error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1192  				start_byte, data_end, punch);
1193  		if (error)
1194  			goto out_unlock;
1195  
1196  		/* The next data search starts at the end of this one. */
1197  		start_byte = data_end;
1198  	}
1199  
1200  	if (punch_start_byte < end_byte)
1201  		error = punch(inode, punch_start_byte,
1202  				end_byte - punch_start_byte);
1203  out_unlock:
1204  	filemap_invalidate_unlock(inode->i_mapping);
1205  	return error;
1206  }
1207  
1208  /*
1209   * When a short write occurs, the filesystem may need to remove reserved space
1210   * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1211   * filesystems that use delayed allocation, we need to punch out delalloc
1212   * extents from the range that are not dirty in the page cache. As the write can
1213   * race with page faults, there can be dirty pages over the delalloc extent
1214   * outside the range of a short write but still within the delalloc extent
1215   * allocated for this iomap.
1216   *
1217   * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1218   * simplify range iterations.
1219   *
1220   * The punch() callback *must* only punch delalloc extents in the range passed
1221   * to it. It must skip over all other types of extents in the range and leave
1222   * them completely unchanged. It must do this punch atomically with respect to
1223   * other extent modifications.
1224   *
1225   * The punch() callback may be called with a folio locked to prevent writeback
1226   * extent allocation racing at the edge of the range we are currently punching.
1227   * The locked folio may or may not cover the range being punched, so it is not
1228   * safe for the punch() callback to lock folios itself.
1229   *
1230   * Lock order is:
1231   *
1232   * inode->i_rwsem (shared or exclusive)
1233   *   inode->i_mapping->invalidate_lock (exclusive)
1234   *     folio_lock()
1235   *       ->punch
1236   *         internal filesystem allocation lock
1237   */
iomap_file_buffered_write_punch_delalloc(struct inode * inode,struct iomap * iomap,loff_t pos,loff_t length,ssize_t written,iomap_punch_t punch)1238  int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1239  		struct iomap *iomap, loff_t pos, loff_t length,
1240  		ssize_t written, iomap_punch_t punch)
1241  {
1242  	loff_t			start_byte;
1243  	loff_t			end_byte;
1244  	unsigned int		blocksize = i_blocksize(inode);
1245  
1246  	if (iomap->type != IOMAP_DELALLOC)
1247  		return 0;
1248  
1249  	/* If we didn't reserve the blocks, we're not allowed to punch them. */
1250  	if (!(iomap->flags & IOMAP_F_NEW))
1251  		return 0;
1252  
1253  	/*
1254  	 * start_byte refers to the first unused block after a short write. If
1255  	 * nothing was written, round offset down to point at the first block in
1256  	 * the range.
1257  	 */
1258  	if (unlikely(!written))
1259  		start_byte = round_down(pos, blocksize);
1260  	else
1261  		start_byte = round_up(pos + written, blocksize);
1262  	end_byte = round_up(pos + length, blocksize);
1263  
1264  	/* Nothing to do if we've written the entire delalloc extent */
1265  	if (start_byte >= end_byte)
1266  		return 0;
1267  
1268  	return iomap_write_delalloc_release(inode, start_byte, end_byte,
1269  					punch);
1270  }
1271  EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1272  
iomap_unshare_iter(struct iomap_iter * iter)1273  static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1274  {
1275  	struct iomap *iomap = &iter->iomap;
1276  	loff_t pos = iter->pos;
1277  	loff_t length = iomap_length(iter);
1278  	loff_t written = 0;
1279  
1280  	if (!iomap_want_unshare_iter(iter))
1281  		return length;
1282  
1283  	do {
1284  		struct folio *folio;
1285  		int status;
1286  		size_t offset;
1287  		size_t bytes = min_t(u64, SIZE_MAX, length);
1288  
1289  		status = iomap_write_begin(iter, pos, bytes, &folio);
1290  		if (unlikely(status))
1291  			return status;
1292  		if (iomap->flags & IOMAP_F_STALE)
1293  			break;
1294  
1295  		offset = offset_in_folio(folio, pos);
1296  		if (bytes > folio_size(folio) - offset)
1297  			bytes = folio_size(folio) - offset;
1298  
1299  		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1300  		if (WARN_ON_ONCE(bytes == 0))
1301  			return -EIO;
1302  
1303  		cond_resched();
1304  
1305  		pos += bytes;
1306  		written += bytes;
1307  		length -= bytes;
1308  
1309  		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1310  	} while (length > 0);
1311  
1312  	return written;
1313  }
1314  
1315  int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)1316  iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1317  		const struct iomap_ops *ops)
1318  {
1319  	struct iomap_iter iter = {
1320  		.inode		= inode,
1321  		.pos		= pos,
1322  		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
1323  	};
1324  	loff_t size = i_size_read(inode);
1325  	int ret;
1326  
1327  	if (pos < 0 || pos >= size)
1328  		return 0;
1329  
1330  	iter.len = min(len, size - pos);
1331  	while ((ret = iomap_iter(&iter, ops)) > 0)
1332  		iter.processed = iomap_unshare_iter(&iter);
1333  	return ret;
1334  }
1335  EXPORT_SYMBOL_GPL(iomap_file_unshare);
1336  
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)1337  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1338  {
1339  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1340  	loff_t pos = iter->pos;
1341  	loff_t length = iomap_length(iter);
1342  	loff_t written = 0;
1343  
1344  	/* already zeroed?  we're done. */
1345  	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1346  		return length;
1347  
1348  	do {
1349  		struct folio *folio;
1350  		int status;
1351  		size_t offset;
1352  		size_t bytes = min_t(u64, SIZE_MAX, length);
1353  
1354  		status = iomap_write_begin(iter, pos, bytes, &folio);
1355  		if (status)
1356  			return status;
1357  		if (iter->iomap.flags & IOMAP_F_STALE)
1358  			break;
1359  
1360  		offset = offset_in_folio(folio, pos);
1361  		if (bytes > folio_size(folio) - offset)
1362  			bytes = folio_size(folio) - offset;
1363  
1364  		folio_zero_range(folio, offset, bytes);
1365  		folio_mark_accessed(folio);
1366  
1367  		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1368  		if (WARN_ON_ONCE(bytes == 0))
1369  			return -EIO;
1370  
1371  		pos += bytes;
1372  		length -= bytes;
1373  		written += bytes;
1374  	} while (length > 0);
1375  
1376  	if (did_zero)
1377  		*did_zero = true;
1378  	return written;
1379  }
1380  
1381  int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1382  iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1383  		const struct iomap_ops *ops)
1384  {
1385  	struct iomap_iter iter = {
1386  		.inode		= inode,
1387  		.pos		= pos,
1388  		.len		= len,
1389  		.flags		= IOMAP_ZERO,
1390  	};
1391  	int ret;
1392  
1393  	while ((ret = iomap_iter(&iter, ops)) > 0)
1394  		iter.processed = iomap_zero_iter(&iter, did_zero);
1395  	return ret;
1396  }
1397  EXPORT_SYMBOL_GPL(iomap_zero_range);
1398  
1399  int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1400  iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1401  		const struct iomap_ops *ops)
1402  {
1403  	unsigned int blocksize = i_blocksize(inode);
1404  	unsigned int off = pos & (blocksize - 1);
1405  
1406  	/* Block boundary? Nothing to do */
1407  	if (!off)
1408  		return 0;
1409  	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1410  }
1411  EXPORT_SYMBOL_GPL(iomap_truncate_page);
1412  
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1413  static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1414  		struct folio *folio)
1415  {
1416  	loff_t length = iomap_length(iter);
1417  	int ret;
1418  
1419  	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1420  		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1421  					      &iter->iomap);
1422  		if (ret)
1423  			return ret;
1424  		block_commit_write(&folio->page, 0, length);
1425  	} else {
1426  		WARN_ON_ONCE(!folio_test_uptodate(folio));
1427  		folio_mark_dirty(folio);
1428  	}
1429  
1430  	return length;
1431  }
1432  
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1433  vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1434  {
1435  	struct iomap_iter iter = {
1436  		.inode		= file_inode(vmf->vma->vm_file),
1437  		.flags		= IOMAP_WRITE | IOMAP_FAULT,
1438  	};
1439  	struct folio *folio = page_folio(vmf->page);
1440  	ssize_t ret;
1441  
1442  	folio_lock(folio);
1443  	ret = folio_mkwrite_check_truncate(folio, iter.inode);
1444  	if (ret < 0)
1445  		goto out_unlock;
1446  	iter.pos = folio_pos(folio);
1447  	iter.len = ret;
1448  	while ((ret = iomap_iter(&iter, ops)) > 0)
1449  		iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1450  
1451  	if (ret < 0)
1452  		goto out_unlock;
1453  	folio_wait_stable(folio);
1454  	return VM_FAULT_LOCKED;
1455  out_unlock:
1456  	folio_unlock(folio);
1457  	return vmf_fs_error(ret);
1458  }
1459  EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1460  
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len,int error)1461  static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1462  		size_t len, int error)
1463  {
1464  	struct iomap_folio_state *ifs = folio->private;
1465  
1466  	if (error) {
1467  		folio_set_error(folio);
1468  		mapping_set_error(inode->i_mapping, error);
1469  	}
1470  
1471  	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1472  	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1473  
1474  	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1475  		folio_end_writeback(folio);
1476  }
1477  
1478  /*
1479   * We're now finished for good with this ioend structure.  Update the page
1480   * state, release holds on bios, and finally free up memory.  Do not use the
1481   * ioend after this.
1482   */
1483  static u32
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1484  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1485  {
1486  	struct inode *inode = ioend->io_inode;
1487  	struct bio *bio = &ioend->io_inline_bio;
1488  	struct bio *last = ioend->io_bio, *next;
1489  	u64 start = bio->bi_iter.bi_sector;
1490  	loff_t offset = ioend->io_offset;
1491  	bool quiet = bio_flagged(bio, BIO_QUIET);
1492  	u32 folio_count = 0;
1493  
1494  	for (bio = &ioend->io_inline_bio; bio; bio = next) {
1495  		struct folio_iter fi;
1496  
1497  		/*
1498  		 * For the last bio, bi_private points to the ioend, so we
1499  		 * need to explicitly end the iteration here.
1500  		 */
1501  		if (bio == last)
1502  			next = NULL;
1503  		else
1504  			next = bio->bi_private;
1505  
1506  		/* walk all folios in bio, ending page IO on them */
1507  		bio_for_each_folio_all(fi, bio) {
1508  			iomap_finish_folio_write(inode, fi.folio, fi.length,
1509  					error);
1510  			folio_count++;
1511  		}
1512  		bio_put(bio);
1513  	}
1514  	/* The ioend has been freed by bio_put() */
1515  
1516  	if (unlikely(error && !quiet)) {
1517  		printk_ratelimited(KERN_ERR
1518  "%s: writeback error on inode %lu, offset %lld, sector %llu",
1519  			inode->i_sb->s_id, inode->i_ino, offset, start);
1520  	}
1521  	return folio_count;
1522  }
1523  
1524  /*
1525   * Ioend completion routine for merged bios. This can only be called from task
1526   * contexts as merged ioends can be of unbound length. Hence we have to break up
1527   * the writeback completions into manageable chunks to avoid long scheduler
1528   * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1529   * good batch processing throughput without creating adverse scheduler latency
1530   * conditions.
1531   */
1532  void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1533  iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1534  {
1535  	struct list_head tmp;
1536  	u32 completions;
1537  
1538  	might_sleep();
1539  
1540  	list_replace_init(&ioend->io_list, &tmp);
1541  	completions = iomap_finish_ioend(ioend, error);
1542  
1543  	while (!list_empty(&tmp)) {
1544  		if (completions > IOEND_BATCH_SIZE * 8) {
1545  			cond_resched();
1546  			completions = 0;
1547  		}
1548  		ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1549  		list_del_init(&ioend->io_list);
1550  		completions += iomap_finish_ioend(ioend, error);
1551  	}
1552  }
1553  EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1554  
1555  /*
1556   * We can merge two adjacent ioends if they have the same set of work to do.
1557   */
1558  static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1559  iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1560  {
1561  	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1562  		return false;
1563  	if ((ioend->io_flags & IOMAP_F_SHARED) ^
1564  	    (next->io_flags & IOMAP_F_SHARED))
1565  		return false;
1566  	if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1567  	    (next->io_type == IOMAP_UNWRITTEN))
1568  		return false;
1569  	if (ioend->io_offset + ioend->io_size != next->io_offset)
1570  		return false;
1571  	/*
1572  	 * Do not merge physically discontiguous ioends. The filesystem
1573  	 * completion functions will have to iterate the physical
1574  	 * discontiguities even if we merge the ioends at a logical level, so
1575  	 * we don't gain anything by merging physical discontiguities here.
1576  	 *
1577  	 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1578  	 * submission so does not point to the start sector of the bio at
1579  	 * completion.
1580  	 */
1581  	if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1582  		return false;
1583  	return true;
1584  }
1585  
1586  void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)1587  iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1588  {
1589  	struct iomap_ioend *next;
1590  
1591  	INIT_LIST_HEAD(&ioend->io_list);
1592  
1593  	while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1594  			io_list))) {
1595  		if (!iomap_ioend_can_merge(ioend, next))
1596  			break;
1597  		list_move_tail(&next->io_list, &ioend->io_list);
1598  		ioend->io_size += next->io_size;
1599  	}
1600  }
1601  EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1602  
1603  static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)1604  iomap_ioend_compare(void *priv, const struct list_head *a,
1605  		const struct list_head *b)
1606  {
1607  	struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1608  	struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1609  
1610  	if (ia->io_offset < ib->io_offset)
1611  		return -1;
1612  	if (ia->io_offset > ib->io_offset)
1613  		return 1;
1614  	return 0;
1615  }
1616  
1617  void
iomap_sort_ioends(struct list_head * ioend_list)1618  iomap_sort_ioends(struct list_head *ioend_list)
1619  {
1620  	list_sort(NULL, ioend_list, iomap_ioend_compare);
1621  }
1622  EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1623  
iomap_writepage_end_bio(struct bio * bio)1624  static void iomap_writepage_end_bio(struct bio *bio)
1625  {
1626  	struct iomap_ioend *ioend = bio->bi_private;
1627  
1628  	iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1629  }
1630  
1631  /*
1632   * Submit the final bio for an ioend.
1633   *
1634   * If @error is non-zero, it means that we have a situation where some part of
1635   * the submission process has failed after we've marked pages for writeback
1636   * and unlocked them.  In this situation, we need to fail the bio instead of
1637   * submitting it.  This typically only happens on a filesystem shutdown.
1638   */
1639  static int
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,struct iomap_ioend * ioend,int error)1640  iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1641  		int error)
1642  {
1643  	ioend->io_bio->bi_private = ioend;
1644  	ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1645  
1646  	if (wpc->ops->prepare_ioend)
1647  		error = wpc->ops->prepare_ioend(ioend, error);
1648  	if (error) {
1649  		/*
1650  		 * If we're failing the IO now, just mark the ioend with an
1651  		 * error and finish it.  This will run IO completion immediately
1652  		 * as there is only one reference to the ioend at this point in
1653  		 * time.
1654  		 */
1655  		ioend->io_bio->bi_status = errno_to_blk_status(error);
1656  		bio_endio(ioend->io_bio);
1657  		return error;
1658  	}
1659  
1660  	submit_bio(ioend->io_bio);
1661  	return 0;
1662  }
1663  
1664  static struct iomap_ioend *
iomap_alloc_ioend(struct inode * inode,struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector,struct writeback_control * wbc)1665  iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1666  		loff_t offset, sector_t sector, struct writeback_control *wbc)
1667  {
1668  	struct iomap_ioend *ioend;
1669  	struct bio *bio;
1670  
1671  	bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1672  			       REQ_OP_WRITE | wbc_to_write_flags(wbc),
1673  			       GFP_NOFS, &iomap_ioend_bioset);
1674  	bio->bi_iter.bi_sector = sector;
1675  	wbc_init_bio(wbc, bio);
1676  
1677  	ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1678  	INIT_LIST_HEAD(&ioend->io_list);
1679  	ioend->io_type = wpc->iomap.type;
1680  	ioend->io_flags = wpc->iomap.flags;
1681  	ioend->io_inode = inode;
1682  	ioend->io_size = 0;
1683  	ioend->io_folios = 0;
1684  	ioend->io_offset = offset;
1685  	ioend->io_bio = bio;
1686  	ioend->io_sector = sector;
1687  	return ioend;
1688  }
1689  
1690  /*
1691   * Allocate a new bio, and chain the old bio to the new one.
1692   *
1693   * Note that we have to perform the chaining in this unintuitive order
1694   * so that the bi_private linkage is set up in the right direction for the
1695   * traversal in iomap_finish_ioend().
1696   */
1697  static struct bio *
iomap_chain_bio(struct bio * prev)1698  iomap_chain_bio(struct bio *prev)
1699  {
1700  	struct bio *new;
1701  
1702  	new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1703  	bio_clone_blkg_association(new, prev);
1704  	new->bi_iter.bi_sector = bio_end_sector(prev);
1705  
1706  	bio_chain(prev, new);
1707  	bio_get(prev);		/* for iomap_finish_ioend */
1708  	submit_bio(prev);
1709  	return new;
1710  }
1711  
1712  static bool
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t offset,sector_t sector)1713  iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1714  		sector_t sector)
1715  {
1716  	if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1717  	    (wpc->ioend->io_flags & IOMAP_F_SHARED))
1718  		return false;
1719  	if (wpc->iomap.type != wpc->ioend->io_type)
1720  		return false;
1721  	if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1722  		return false;
1723  	if (sector != bio_end_sector(wpc->ioend->io_bio))
1724  		return false;
1725  	/*
1726  	 * Limit ioend bio chain lengths to minimise IO completion latency. This
1727  	 * also prevents long tight loops ending page writeback on all the
1728  	 * folios in the ioend.
1729  	 */
1730  	if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1731  		return false;
1732  	return true;
1733  }
1734  
1735  /*
1736   * Test to see if we have an existing ioend structure that we could append to
1737   * first; otherwise finish off the current ioend and start another.
1738   */
1739  static void
iomap_add_to_ioend(struct inode * inode,loff_t pos,struct folio * folio,struct iomap_folio_state * ifs,struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct list_head * iolist)1740  iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1741  		struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
1742  		struct writeback_control *wbc, struct list_head *iolist)
1743  {
1744  	sector_t sector = iomap_sector(&wpc->iomap, pos);
1745  	unsigned len = i_blocksize(inode);
1746  	size_t poff = offset_in_folio(folio, pos);
1747  
1748  	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1749  		if (wpc->ioend)
1750  			list_add(&wpc->ioend->io_list, iolist);
1751  		wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1752  	}
1753  
1754  	if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1755  		wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1756  		bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1757  	}
1758  
1759  	if (ifs)
1760  		atomic_add(len, &ifs->write_bytes_pending);
1761  	wpc->ioend->io_size += len;
1762  	wbc_account_cgroup_owner(wbc, &folio->page, len);
1763  }
1764  
1765  /*
1766   * We implement an immediate ioend submission policy here to avoid needing to
1767   * chain multiple ioends and hence nest mempool allocations which can violate
1768   * the forward progress guarantees we need to provide. The current ioend we're
1769   * adding blocks to is cached in the writepage context, and if the new block
1770   * doesn't append to the cached ioend, it will create a new ioend and cache that
1771   * instead.
1772   *
1773   * If a new ioend is created and cached, the old ioend is returned and queued
1774   * locally for submission once the entire page is processed or an error has been
1775   * detected.  While ioends are submitted immediately after they are completed,
1776   * batching optimisations are provided by higher level block plugging.
1777   *
1778   * At the end of a writeback pass, there will be a cached ioend remaining on the
1779   * writepage context that the caller will need to submit.
1780   */
1781  static int
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,struct folio * folio,u64 end_pos)1782  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1783  		struct writeback_control *wbc, struct inode *inode,
1784  		struct folio *folio, u64 end_pos)
1785  {
1786  	struct iomap_folio_state *ifs = folio->private;
1787  	struct iomap_ioend *ioend, *next;
1788  	unsigned len = i_blocksize(inode);
1789  	unsigned nblocks = i_blocks_per_folio(inode, folio);
1790  	u64 pos = folio_pos(folio);
1791  	int error = 0, count = 0, i;
1792  	LIST_HEAD(submit_list);
1793  
1794  	WARN_ON_ONCE(end_pos <= pos);
1795  
1796  	if (!ifs && nblocks > 1) {
1797  		ifs = ifs_alloc(inode, folio, 0);
1798  		iomap_set_range_dirty(folio, 0, end_pos - pos);
1799  	}
1800  
1801  	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
1802  
1803  	/*
1804  	 * Walk through the folio to find areas to write back. If we
1805  	 * run off the end of the current map or find the current map
1806  	 * invalid, grab a new one.
1807  	 */
1808  	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1809  		if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1810  			continue;
1811  
1812  		error = wpc->ops->map_blocks(wpc, inode, pos);
1813  		if (error)
1814  			break;
1815  		trace_iomap_writepage_map(inode, &wpc->iomap);
1816  		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1817  			continue;
1818  		if (wpc->iomap.type == IOMAP_HOLE)
1819  			continue;
1820  		iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
1821  				 &submit_list);
1822  		count++;
1823  	}
1824  	if (count)
1825  		wpc->ioend->io_folios++;
1826  
1827  	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1828  	WARN_ON_ONCE(!folio_test_locked(folio));
1829  	WARN_ON_ONCE(folio_test_writeback(folio));
1830  	WARN_ON_ONCE(folio_test_dirty(folio));
1831  
1832  	/*
1833  	 * We cannot cancel the ioend directly here on error.  We may have
1834  	 * already set other pages under writeback and hence we have to run I/O
1835  	 * completion to mark the error state of the pages under writeback
1836  	 * appropriately.
1837  	 */
1838  	if (unlikely(error)) {
1839  		/*
1840  		 * Let the filesystem know what portion of the current page
1841  		 * failed to map.
1842  		 */
1843  		if (wpc->ops->discard_folio)
1844  			wpc->ops->discard_folio(folio, pos);
1845  	}
1846  
1847  	/*
1848  	 * We can have dirty bits set past end of file in page_mkwrite path
1849  	 * while mapping the last partial folio. Hence it's better to clear
1850  	 * all the dirty bits in the folio here.
1851  	 */
1852  	iomap_clear_range_dirty(folio, 0, folio_size(folio));
1853  
1854  	/*
1855  	 * If the page hasn't been added to the ioend, it won't be affected by
1856  	 * I/O completion and we must unlock it now.
1857  	 */
1858  	if (error && !count) {
1859  		folio_unlock(folio);
1860  		goto done;
1861  	}
1862  
1863  	folio_start_writeback(folio);
1864  	folio_unlock(folio);
1865  
1866  	/*
1867  	 * Preserve the original error if there was one; catch
1868  	 * submission errors here and propagate into subsequent ioend
1869  	 * submissions.
1870  	 */
1871  	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1872  		int error2;
1873  
1874  		list_del_init(&ioend->io_list);
1875  		error2 = iomap_submit_ioend(wpc, ioend, error);
1876  		if (error2 && !error)
1877  			error = error2;
1878  	}
1879  
1880  	/*
1881  	 * We can end up here with no error and nothing to write only if we race
1882  	 * with a partial page truncate on a sub-page block sized filesystem.
1883  	 */
1884  	if (!count)
1885  		folio_end_writeback(folio);
1886  done:
1887  	mapping_set_error(inode->i_mapping, error);
1888  	return error;
1889  }
1890  
1891  /*
1892   * Write out a dirty page.
1893   *
1894   * For delalloc space on the page, we need to allocate space and flush it.
1895   * For unwritten space on the page, we need to start the conversion to
1896   * regular allocated space.
1897   */
iomap_do_writepage(struct folio * folio,struct writeback_control * wbc,void * data)1898  static int iomap_do_writepage(struct folio *folio,
1899  		struct writeback_control *wbc, void *data)
1900  {
1901  	struct iomap_writepage_ctx *wpc = data;
1902  	struct inode *inode = folio->mapping->host;
1903  	u64 end_pos, isize;
1904  
1905  	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1906  
1907  	/*
1908  	 * Refuse to write the folio out if we're called from reclaim context.
1909  	 *
1910  	 * This avoids stack overflows when called from deeply used stacks in
1911  	 * random callers for direct reclaim or memcg reclaim.  We explicitly
1912  	 * allow reclaim from kswapd as the stack usage there is relatively low.
1913  	 *
1914  	 * This should never happen except in the case of a VM regression so
1915  	 * warn about it.
1916  	 */
1917  	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1918  			PF_MEMALLOC))
1919  		goto redirty;
1920  
1921  	/*
1922  	 * Is this folio beyond the end of the file?
1923  	 *
1924  	 * The folio index is less than the end_index, adjust the end_pos
1925  	 * to the highest offset that this folio should represent.
1926  	 * -----------------------------------------------------
1927  	 * |			file mapping	       | <EOF> |
1928  	 * -----------------------------------------------------
1929  	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1930  	 * ^--------------------------------^----------|--------
1931  	 * |     desired writeback range    |      see else    |
1932  	 * ---------------------------------^------------------|
1933  	 */
1934  	isize = i_size_read(inode);
1935  	end_pos = folio_pos(folio) + folio_size(folio);
1936  	if (end_pos > isize) {
1937  		/*
1938  		 * Check whether the page to write out is beyond or straddles
1939  		 * i_size or not.
1940  		 * -------------------------------------------------------
1941  		 * |		file mapping		        | <EOF>  |
1942  		 * -------------------------------------------------------
1943  		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1944  		 * ^--------------------------------^-----------|---------
1945  		 * |				    |      Straddles     |
1946  		 * ---------------------------------^-----------|--------|
1947  		 */
1948  		size_t poff = offset_in_folio(folio, isize);
1949  		pgoff_t end_index = isize >> PAGE_SHIFT;
1950  
1951  		/*
1952  		 * Skip the page if it's fully outside i_size, e.g.
1953  		 * due to a truncate operation that's in progress.  We've
1954  		 * cleaned this page and truncate will finish things off for
1955  		 * us.
1956  		 *
1957  		 * Note that the end_index is unsigned long.  If the given
1958  		 * offset is greater than 16TB on a 32-bit system then if we
1959  		 * checked if the page is fully outside i_size with
1960  		 * "if (page->index >= end_index + 1)", "end_index + 1" would
1961  		 * overflow and evaluate to 0.  Hence this page would be
1962  		 * redirtied and written out repeatedly, which would result in
1963  		 * an infinite loop; the user program performing this operation
1964  		 * would hang.  Instead, we can detect this situation by
1965  		 * checking if the page is totally beyond i_size or if its
1966  		 * offset is just equal to the EOF.
1967  		 */
1968  		if (folio->index > end_index ||
1969  		    (folio->index == end_index && poff == 0))
1970  			goto unlock;
1971  
1972  		/*
1973  		 * The page straddles i_size.  It must be zeroed out on each
1974  		 * and every writepage invocation because it may be mmapped.
1975  		 * "A file is mapped in multiples of the page size.  For a file
1976  		 * that is not a multiple of the page size, the remaining
1977  		 * memory is zeroed when mapped, and writes to that region are
1978  		 * not written out to the file."
1979  		 */
1980  		folio_zero_segment(folio, poff, folio_size(folio));
1981  		end_pos = isize;
1982  	}
1983  
1984  	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1985  
1986  redirty:
1987  	folio_redirty_for_writepage(wbc, folio);
1988  unlock:
1989  	folio_unlock(folio);
1990  	return 0;
1991  }
1992  
1993  int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1994  iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1995  		struct iomap_writepage_ctx *wpc,
1996  		const struct iomap_writeback_ops *ops)
1997  {
1998  	int			ret;
1999  
2000  	wpc->ops = ops;
2001  	ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
2002  	if (!wpc->ioend)
2003  		return ret;
2004  	return iomap_submit_ioend(wpc, wpc->ioend, ret);
2005  }
2006  EXPORT_SYMBOL_GPL(iomap_writepages);
2007  
iomap_init(void)2008  static int __init iomap_init(void)
2009  {
2010  	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2011  			   offsetof(struct iomap_ioend, io_inline_bio),
2012  			   BIOSET_NEED_BVECS);
2013  }
2014  fs_initcall(iomap_init);
2015