xref: /openbmc/linux/fs/iomap/buffered-io.c (revision cae2de6978915991a564e3c5c69b66b629c031af)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2019 Christoph Hellwig.
5  */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
20 #include "trace.h"
21 
22 #include "../internal.h"
23 
24 #define IOEND_BATCH_SIZE	4096
25 
26 /*
27  * Structure allocated for each folio when block size < folio size
28  * to track sub-folio uptodate status and I/O completions.
29  */
30 struct iomap_page {
31 	atomic_t		read_bytes_pending;
32 	atomic_t		write_bytes_pending;
33 	spinlock_t		uptodate_lock;
34 	unsigned long		uptodate[];
35 };
36 
37 static inline struct iomap_page *to_iomap_page(struct folio *folio)
38 {
39 	if (folio_test_private(folio))
40 		return folio_get_private(folio);
41 	return NULL;
42 }
43 
44 static struct bio_set iomap_ioend_bioset;
45 
46 static struct iomap_page *
47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
48 {
49 	struct iomap_page *iop = to_iomap_page(folio);
50 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
51 	gfp_t gfp;
52 
53 	if (iop || nr_blocks <= 1)
54 		return iop;
55 
56 	if (flags & IOMAP_NOWAIT)
57 		gfp = GFP_NOWAIT;
58 	else
59 		gfp = GFP_NOFS | __GFP_NOFAIL;
60 
61 	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
62 		      gfp);
63 	if (iop) {
64 		spin_lock_init(&iop->uptodate_lock);
65 		if (folio_test_uptodate(folio))
66 			bitmap_fill(iop->uptodate, nr_blocks);
67 		folio_attach_private(folio, iop);
68 	}
69 	return iop;
70 }
71 
72 static void iomap_page_release(struct folio *folio)
73 {
74 	struct iomap_page *iop = folio_detach_private(folio);
75 	struct inode *inode = folio->mapping->host;
76 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
77 
78 	if (!iop)
79 		return;
80 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
81 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
82 	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
83 			folio_test_uptodate(folio));
84 	kfree(iop);
85 }
86 
87 /*
88  * Calculate the range inside the folio that we actually need to read.
89  */
90 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
91 		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
92 {
93 	struct iomap_page *iop = to_iomap_page(folio);
94 	loff_t orig_pos = *pos;
95 	loff_t isize = i_size_read(inode);
96 	unsigned block_bits = inode->i_blkbits;
97 	unsigned block_size = (1 << block_bits);
98 	size_t poff = offset_in_folio(folio, *pos);
99 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
100 	unsigned first = poff >> block_bits;
101 	unsigned last = (poff + plen - 1) >> block_bits;
102 
103 	/*
104 	 * If the block size is smaller than the page size, we need to check the
105 	 * per-block uptodate status and adjust the offset and length if needed
106 	 * to avoid reading in already uptodate ranges.
107 	 */
108 	if (iop) {
109 		unsigned int i;
110 
111 		/* move forward for each leading block marked uptodate */
112 		for (i = first; i <= last; i++) {
113 			if (!test_bit(i, iop->uptodate))
114 				break;
115 			*pos += block_size;
116 			poff += block_size;
117 			plen -= block_size;
118 			first++;
119 		}
120 
121 		/* truncate len if we find any trailing uptodate block(s) */
122 		for ( ; i <= last; i++) {
123 			if (test_bit(i, iop->uptodate)) {
124 				plen -= (last - i + 1) * block_size;
125 				last = i - 1;
126 				break;
127 			}
128 		}
129 	}
130 
131 	/*
132 	 * If the extent spans the block that contains the i_size, we need to
133 	 * handle both halves separately so that we properly zero data in the
134 	 * page cache for blocks that are entirely outside of i_size.
135 	 */
136 	if (orig_pos <= isize && orig_pos + length > isize) {
137 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
138 
139 		if (first <= end && last > end)
140 			plen -= (last - end) * block_size;
141 	}
142 
143 	*offp = poff;
144 	*lenp = plen;
145 }
146 
147 static void iomap_iop_set_range_uptodate(struct folio *folio,
148 		struct iomap_page *iop, size_t off, size_t len)
149 {
150 	struct inode *inode = folio->mapping->host;
151 	unsigned first = off >> inode->i_blkbits;
152 	unsigned last = (off + len - 1) >> inode->i_blkbits;
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&iop->uptodate_lock, flags);
156 	bitmap_set(iop->uptodate, first, last - first + 1);
157 	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
158 		folio_mark_uptodate(folio);
159 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
160 }
161 
162 static void iomap_set_range_uptodate(struct folio *folio,
163 		struct iomap_page *iop, size_t off, size_t len)
164 {
165 	if (folio_test_error(folio))
166 		return;
167 
168 	if (iop)
169 		iomap_iop_set_range_uptodate(folio, iop, off, len);
170 	else
171 		folio_mark_uptodate(folio);
172 }
173 
174 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
175 		size_t len, int error)
176 {
177 	struct iomap_page *iop = to_iomap_page(folio);
178 
179 	if (unlikely(error)) {
180 		folio_clear_uptodate(folio);
181 		folio_set_error(folio);
182 	} else {
183 		iomap_set_range_uptodate(folio, iop, offset, len);
184 	}
185 
186 	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
187 		folio_unlock(folio);
188 }
189 
190 static void iomap_read_end_io(struct bio *bio)
191 {
192 	int error = blk_status_to_errno(bio->bi_status);
193 	struct folio_iter fi;
194 
195 	bio_for_each_folio_all(fi, bio)
196 		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
197 	bio_put(bio);
198 }
199 
200 struct iomap_readpage_ctx {
201 	struct folio		*cur_folio;
202 	bool			cur_folio_in_bio;
203 	struct bio		*bio;
204 	struct readahead_control *rac;
205 };
206 
207 /**
208  * iomap_read_inline_data - copy inline data into the page cache
209  * @iter: iteration structure
210  * @folio: folio to copy to
211  *
212  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
213  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
214  * Returns zero for success to complete the read, or the usual negative errno.
215  */
216 static int iomap_read_inline_data(const struct iomap_iter *iter,
217 		struct folio *folio)
218 {
219 	struct iomap_page *iop;
220 	const struct iomap *iomap = iomap_iter_srcmap(iter);
221 	size_t size = i_size_read(iter->inode) - iomap->offset;
222 	size_t poff = offset_in_page(iomap->offset);
223 	size_t offset = offset_in_folio(folio, iomap->offset);
224 	void *addr;
225 
226 	if (folio_test_uptodate(folio))
227 		return 0;
228 
229 	if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
230 		return -EIO;
231 	if (WARN_ON_ONCE(size > PAGE_SIZE -
232 			 offset_in_page(iomap->inline_data)))
233 		return -EIO;
234 	if (WARN_ON_ONCE(size > iomap->length))
235 		return -EIO;
236 	if (offset > 0)
237 		iop = iomap_page_create(iter->inode, folio, iter->flags);
238 	else
239 		iop = to_iomap_page(folio);
240 
241 	addr = kmap_local_folio(folio, offset);
242 	memcpy(addr, iomap->inline_data, size);
243 	memset(addr + size, 0, PAGE_SIZE - poff - size);
244 	kunmap_local(addr);
245 	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
246 	return 0;
247 }
248 
249 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
250 		loff_t pos)
251 {
252 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
253 
254 	return srcmap->type != IOMAP_MAPPED ||
255 		(srcmap->flags & IOMAP_F_NEW) ||
256 		pos >= i_size_read(iter->inode);
257 }
258 
259 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
260 		struct iomap_readpage_ctx *ctx, loff_t offset)
261 {
262 	const struct iomap *iomap = &iter->iomap;
263 	loff_t pos = iter->pos + offset;
264 	loff_t length = iomap_length(iter) - offset;
265 	struct folio *folio = ctx->cur_folio;
266 	struct iomap_page *iop;
267 	loff_t orig_pos = pos;
268 	size_t poff, plen;
269 	sector_t sector;
270 
271 	if (iomap->type == IOMAP_INLINE)
272 		return iomap_read_inline_data(iter, folio);
273 
274 	/* zero post-eof blocks as the page may be mapped */
275 	iop = iomap_page_create(iter->inode, folio, iter->flags);
276 	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
277 	if (plen == 0)
278 		goto done;
279 
280 	if (iomap_block_needs_zeroing(iter, pos)) {
281 		folio_zero_range(folio, poff, plen);
282 		iomap_set_range_uptodate(folio, iop, poff, plen);
283 		goto done;
284 	}
285 
286 	ctx->cur_folio_in_bio = true;
287 	if (iop)
288 		atomic_add(plen, &iop->read_bytes_pending);
289 
290 	sector = iomap_sector(iomap, pos);
291 	if (!ctx->bio ||
292 	    bio_end_sector(ctx->bio) != sector ||
293 	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
294 		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
295 		gfp_t orig_gfp = gfp;
296 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
297 
298 		if (ctx->bio)
299 			submit_bio(ctx->bio);
300 
301 		if (ctx->rac) /* same as readahead_gfp_mask */
302 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
303 		ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
304 				     REQ_OP_READ, gfp);
305 		/*
306 		 * If the bio_alloc fails, try it again for a single page to
307 		 * avoid having to deal with partial page reads.  This emulates
308 		 * what do_mpage_read_folio does.
309 		 */
310 		if (!ctx->bio) {
311 			ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
312 					     orig_gfp);
313 		}
314 		if (ctx->rac)
315 			ctx->bio->bi_opf |= REQ_RAHEAD;
316 		ctx->bio->bi_iter.bi_sector = sector;
317 		ctx->bio->bi_end_io = iomap_read_end_io;
318 		bio_add_folio(ctx->bio, folio, plen, poff);
319 	}
320 
321 done:
322 	/*
323 	 * Move the caller beyond our range so that it keeps making progress.
324 	 * For that, we have to include any leading non-uptodate ranges, but
325 	 * we can skip trailing ones as they will be handled in the next
326 	 * iteration.
327 	 */
328 	return pos - orig_pos + plen;
329 }
330 
331 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
332 {
333 	struct iomap_iter iter = {
334 		.inode		= folio->mapping->host,
335 		.pos		= folio_pos(folio),
336 		.len		= folio_size(folio),
337 	};
338 	struct iomap_readpage_ctx ctx = {
339 		.cur_folio	= folio,
340 	};
341 	int ret;
342 
343 	trace_iomap_readpage(iter.inode, 1);
344 
345 	while ((ret = iomap_iter(&iter, ops)) > 0)
346 		iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
347 
348 	if (ret < 0)
349 		folio_set_error(folio);
350 
351 	if (ctx.bio) {
352 		submit_bio(ctx.bio);
353 		WARN_ON_ONCE(!ctx.cur_folio_in_bio);
354 	} else {
355 		WARN_ON_ONCE(ctx.cur_folio_in_bio);
356 		folio_unlock(folio);
357 	}
358 
359 	/*
360 	 * Just like mpage_readahead and block_read_full_folio, we always
361 	 * return 0 and just set the folio error flag on errors.  This
362 	 * should be cleaned up throughout the stack eventually.
363 	 */
364 	return 0;
365 }
366 EXPORT_SYMBOL_GPL(iomap_read_folio);
367 
368 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
369 		struct iomap_readpage_ctx *ctx)
370 {
371 	loff_t length = iomap_length(iter);
372 	loff_t done, ret;
373 
374 	for (done = 0; done < length; done += ret) {
375 		if (ctx->cur_folio &&
376 		    offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
377 			if (!ctx->cur_folio_in_bio)
378 				folio_unlock(ctx->cur_folio);
379 			ctx->cur_folio = NULL;
380 		}
381 		if (!ctx->cur_folio) {
382 			ctx->cur_folio = readahead_folio(ctx->rac);
383 			ctx->cur_folio_in_bio = false;
384 		}
385 		ret = iomap_readpage_iter(iter, ctx, done);
386 		if (ret <= 0)
387 			return ret;
388 	}
389 
390 	return done;
391 }
392 
393 /**
394  * iomap_readahead - Attempt to read pages from a file.
395  * @rac: Describes the pages to be read.
396  * @ops: The operations vector for the filesystem.
397  *
398  * This function is for filesystems to call to implement their readahead
399  * address_space operation.
400  *
401  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
402  * blocks from disc), and may wait for it.  The caller may be trying to
403  * access a different page, and so sleeping excessively should be avoided.
404  * It may allocate memory, but should avoid costly allocations.  This
405  * function is called with memalloc_nofs set, so allocations will not cause
406  * the filesystem to be reentered.
407  */
408 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
409 {
410 	struct iomap_iter iter = {
411 		.inode	= rac->mapping->host,
412 		.pos	= readahead_pos(rac),
413 		.len	= readahead_length(rac),
414 	};
415 	struct iomap_readpage_ctx ctx = {
416 		.rac	= rac,
417 	};
418 
419 	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
420 
421 	while (iomap_iter(&iter, ops) > 0)
422 		iter.processed = iomap_readahead_iter(&iter, &ctx);
423 
424 	if (ctx.bio)
425 		submit_bio(ctx.bio);
426 	if (ctx.cur_folio) {
427 		if (!ctx.cur_folio_in_bio)
428 			folio_unlock(ctx.cur_folio);
429 	}
430 }
431 EXPORT_SYMBOL_GPL(iomap_readahead);
432 
433 /*
434  * iomap_is_partially_uptodate checks whether blocks within a folio are
435  * uptodate or not.
436  *
437  * Returns true if all blocks which correspond to the specified part
438  * of the folio are uptodate.
439  */
440 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
441 {
442 	struct iomap_page *iop = to_iomap_page(folio);
443 	struct inode *inode = folio->mapping->host;
444 	unsigned first, last, i;
445 
446 	if (!iop)
447 		return false;
448 
449 	/* Caller's range may extend past the end of this folio */
450 	count = min(folio_size(folio) - from, count);
451 
452 	/* First and last blocks in range within folio */
453 	first = from >> inode->i_blkbits;
454 	last = (from + count - 1) >> inode->i_blkbits;
455 
456 	for (i = first; i <= last; i++)
457 		if (!test_bit(i, iop->uptodate))
458 			return false;
459 	return true;
460 }
461 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
462 
463 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
464 {
465 	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
466 			folio_size(folio));
467 
468 	/*
469 	 * mm accommodates an old ext3 case where clean folios might
470 	 * not have had the dirty bit cleared.  Thus, it can send actual
471 	 * dirty folios to ->release_folio() via shrink_active_list();
472 	 * skip those here.
473 	 */
474 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
475 		return false;
476 	iomap_page_release(folio);
477 	return true;
478 }
479 EXPORT_SYMBOL_GPL(iomap_release_folio);
480 
481 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
482 {
483 	trace_iomap_invalidate_folio(folio->mapping->host,
484 					folio_pos(folio) + offset, len);
485 
486 	/*
487 	 * If we're invalidating the entire folio, clear the dirty state
488 	 * from it and release it to avoid unnecessary buildup of the LRU.
489 	 */
490 	if (offset == 0 && len == folio_size(folio)) {
491 		WARN_ON_ONCE(folio_test_writeback(folio));
492 		folio_cancel_dirty(folio);
493 		iomap_page_release(folio);
494 	} else if (folio_test_large(folio)) {
495 		/* Must release the iop so the page can be split */
496 		WARN_ON_ONCE(!folio_test_uptodate(folio) &&
497 			     folio_test_dirty(folio));
498 		iomap_page_release(folio);
499 	}
500 }
501 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
502 
503 #ifdef CONFIG_MIGRATION
504 int
505 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
506 		struct page *page, enum migrate_mode mode)
507 {
508 	struct folio *folio = page_folio(page);
509 	struct folio *newfolio = page_folio(newpage);
510 	int ret;
511 
512 	ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
513 	if (ret != MIGRATEPAGE_SUCCESS)
514 		return ret;
515 
516 	if (folio_test_private(folio))
517 		folio_attach_private(newfolio, folio_detach_private(folio));
518 
519 	if (mode != MIGRATE_SYNC_NO_COPY)
520 		folio_migrate_copy(newfolio, folio);
521 	else
522 		folio_migrate_flags(newfolio, folio);
523 	return MIGRATEPAGE_SUCCESS;
524 }
525 EXPORT_SYMBOL_GPL(iomap_migrate_page);
526 #endif /* CONFIG_MIGRATION */
527 
528 static void
529 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
530 {
531 	loff_t i_size = i_size_read(inode);
532 
533 	/*
534 	 * Only truncate newly allocated pages beyoned EOF, even if the
535 	 * write started inside the existing inode size.
536 	 */
537 	if (pos + len > i_size)
538 		truncate_pagecache_range(inode, max(pos, i_size),
539 					 pos + len - 1);
540 }
541 
542 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
543 		size_t poff, size_t plen, const struct iomap *iomap)
544 {
545 	struct bio_vec bvec;
546 	struct bio bio;
547 
548 	bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
549 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
550 	bio_add_folio(&bio, folio, plen, poff);
551 	return submit_bio_wait(&bio);
552 }
553 
554 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
555 		size_t len, struct folio *folio)
556 {
557 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
558 	struct iomap_page *iop;
559 	loff_t block_size = i_blocksize(iter->inode);
560 	loff_t block_start = round_down(pos, block_size);
561 	loff_t block_end = round_up(pos + len, block_size);
562 	unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
563 	size_t from = offset_in_folio(folio, pos), to = from + len;
564 	size_t poff, plen;
565 
566 	if (folio_test_uptodate(folio))
567 		return 0;
568 	folio_clear_error(folio);
569 
570 	iop = iomap_page_create(iter->inode, folio, iter->flags);
571 	if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
572 		return -EAGAIN;
573 
574 	do {
575 		iomap_adjust_read_range(iter->inode, folio, &block_start,
576 				block_end - block_start, &poff, &plen);
577 		if (plen == 0)
578 			break;
579 
580 		if (!(iter->flags & IOMAP_UNSHARE) &&
581 		    (from <= poff || from >= poff + plen) &&
582 		    (to <= poff || to >= poff + plen))
583 			continue;
584 
585 		if (iomap_block_needs_zeroing(iter, block_start)) {
586 			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
587 				return -EIO;
588 			folio_zero_segments(folio, poff, from, to, poff + plen);
589 		} else {
590 			int status;
591 
592 			if (iter->flags & IOMAP_NOWAIT)
593 				return -EAGAIN;
594 
595 			status = iomap_read_folio_sync(block_start, folio,
596 					poff, plen, srcmap);
597 			if (status)
598 				return status;
599 		}
600 		iomap_set_range_uptodate(folio, iop, poff, plen);
601 	} while ((block_start += plen) < block_end);
602 
603 	return 0;
604 }
605 
606 static int iomap_write_begin_inline(const struct iomap_iter *iter,
607 		struct folio *folio)
608 {
609 	/* needs more work for the tailpacking case; disable for now */
610 	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
611 		return -EIO;
612 	return iomap_read_inline_data(iter, folio);
613 }
614 
615 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
616 		size_t len, struct folio **foliop)
617 {
618 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
619 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
620 	struct folio *folio;
621 	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
622 	int status = 0;
623 
624 	if (iter->flags & IOMAP_NOWAIT)
625 		fgp |= FGP_NOWAIT;
626 
627 	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
628 	if (srcmap != &iter->iomap)
629 		BUG_ON(pos + len > srcmap->offset + srcmap->length);
630 
631 	if (fatal_signal_pending(current))
632 		return -EINTR;
633 
634 	if (!mapping_large_folio_support(iter->inode->i_mapping))
635 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
636 
637 	if (page_ops && page_ops->page_prepare) {
638 		status = page_ops->page_prepare(iter->inode, pos, len);
639 		if (status)
640 			return status;
641 	}
642 
643 	folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
644 			fgp, mapping_gfp_mask(iter->inode->i_mapping));
645 	if (!folio) {
646 		status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
647 		goto out_no_page;
648 	}
649 	if (pos + len > folio_pos(folio) + folio_size(folio))
650 		len = folio_pos(folio) + folio_size(folio) - pos;
651 
652 	if (srcmap->type == IOMAP_INLINE)
653 		status = iomap_write_begin_inline(iter, folio);
654 	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
655 		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
656 	else
657 		status = __iomap_write_begin(iter, pos, len, folio);
658 
659 	if (unlikely(status))
660 		goto out_unlock;
661 
662 	*foliop = folio;
663 	return 0;
664 
665 out_unlock:
666 	folio_unlock(folio);
667 	folio_put(folio);
668 	iomap_write_failed(iter->inode, pos, len);
669 
670 out_no_page:
671 	if (page_ops && page_ops->page_done)
672 		page_ops->page_done(iter->inode, pos, 0, NULL);
673 	return status;
674 }
675 
676 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
677 		size_t copied, struct folio *folio)
678 {
679 	struct iomap_page *iop = to_iomap_page(folio);
680 	flush_dcache_folio(folio);
681 
682 	/*
683 	 * The blocks that were entirely written will now be uptodate, so we
684 	 * don't have to worry about a read_folio reading them and overwriting a
685 	 * partial write.  However, if we've encountered a short write and only
686 	 * partially written into a block, it will not be marked uptodate, so a
687 	 * read_folio might come in and destroy our partial write.
688 	 *
689 	 * Do the simplest thing and just treat any short write to a
690 	 * non-uptodate page as a zero-length write, and force the caller to
691 	 * redo the whole thing.
692 	 */
693 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
694 		return 0;
695 	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
696 	filemap_dirty_folio(inode->i_mapping, folio);
697 	return copied;
698 }
699 
700 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
701 		struct folio *folio, loff_t pos, size_t copied)
702 {
703 	const struct iomap *iomap = &iter->iomap;
704 	void *addr;
705 
706 	WARN_ON_ONCE(!folio_test_uptodate(folio));
707 	BUG_ON(!iomap_inline_data_valid(iomap));
708 
709 	flush_dcache_folio(folio);
710 	addr = kmap_local_folio(folio, pos);
711 	memcpy(iomap_inline_data(iomap, pos), addr, copied);
712 	kunmap_local(addr);
713 
714 	mark_inode_dirty(iter->inode);
715 	return copied;
716 }
717 
718 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
719 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
720 		size_t copied, struct folio *folio)
721 {
722 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
723 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
724 	loff_t old_size = iter->inode->i_size;
725 	size_t ret;
726 
727 	if (srcmap->type == IOMAP_INLINE) {
728 		ret = iomap_write_end_inline(iter, folio, pos, copied);
729 	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
730 		ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
731 				copied, &folio->page, NULL);
732 	} else {
733 		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
734 	}
735 
736 	/*
737 	 * Update the in-memory inode size after copying the data into the page
738 	 * cache.  It's up to the file system to write the updated size to disk,
739 	 * preferably after I/O completion so that no stale data is exposed.
740 	 */
741 	if (pos + ret > old_size) {
742 		i_size_write(iter->inode, pos + ret);
743 		iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
744 	}
745 	folio_unlock(folio);
746 
747 	if (old_size < pos)
748 		pagecache_isize_extended(iter->inode, old_size, pos);
749 	if (page_ops && page_ops->page_done)
750 		page_ops->page_done(iter->inode, pos, ret, &folio->page);
751 	folio_put(folio);
752 
753 	if (ret < len)
754 		iomap_write_failed(iter->inode, pos + ret, len - ret);
755 	return ret;
756 }
757 
758 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
759 {
760 	loff_t length = iomap_length(iter);
761 	loff_t pos = iter->pos;
762 	ssize_t written = 0;
763 	long status = 0;
764 	struct address_space *mapping = iter->inode->i_mapping;
765 	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
766 
767 	do {
768 		struct folio *folio;
769 		struct page *page;
770 		unsigned long offset;	/* Offset into pagecache page */
771 		unsigned long bytes;	/* Bytes to write to page */
772 		size_t copied;		/* Bytes copied from user */
773 
774 		offset = offset_in_page(pos);
775 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
776 						iov_iter_count(i));
777 again:
778 		status = balance_dirty_pages_ratelimited_flags(mapping,
779 							       bdp_flags);
780 		if (unlikely(status))
781 			break;
782 
783 		if (bytes > length)
784 			bytes = length;
785 
786 		/*
787 		 * Bring in the user page that we'll copy from _first_.
788 		 * Otherwise there's a nasty deadlock on copying from the
789 		 * same page as we're writing to, without it being marked
790 		 * up-to-date.
791 		 *
792 		 * For async buffered writes the assumption is that the user
793 		 * page has already been faulted in. This can be optimized by
794 		 * faulting the user page.
795 		 */
796 		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
797 			status = -EFAULT;
798 			break;
799 		}
800 
801 		status = iomap_write_begin(iter, pos, bytes, &folio);
802 		if (unlikely(status))
803 			break;
804 
805 		page = folio_file_page(folio, pos >> PAGE_SHIFT);
806 		if (mapping_writably_mapped(mapping))
807 			flush_dcache_page(page);
808 
809 		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
810 
811 		status = iomap_write_end(iter, pos, bytes, copied, folio);
812 
813 		if (unlikely(copied != status))
814 			iov_iter_revert(i, copied - status);
815 
816 		cond_resched();
817 		if (unlikely(status == 0)) {
818 			/*
819 			 * A short copy made iomap_write_end() reject the
820 			 * thing entirely.  Might be memory poisoning
821 			 * halfway through, might be a race with munmap,
822 			 * might be severe memory pressure.
823 			 */
824 			if (copied)
825 				bytes = copied;
826 			goto again;
827 		}
828 		pos += status;
829 		written += status;
830 		length -= status;
831 	} while (iov_iter_count(i) && length);
832 
833 	return written ? written : status;
834 }
835 
836 ssize_t
837 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
838 		const struct iomap_ops *ops)
839 {
840 	struct iomap_iter iter = {
841 		.inode		= iocb->ki_filp->f_mapping->host,
842 		.pos		= iocb->ki_pos,
843 		.len		= iov_iter_count(i),
844 		.flags		= IOMAP_WRITE,
845 	};
846 	int ret;
847 
848 	if (iocb->ki_flags & IOCB_NOWAIT)
849 		iter.flags |= IOMAP_NOWAIT;
850 
851 	while ((ret = iomap_iter(&iter, ops)) > 0)
852 		iter.processed = iomap_write_iter(&iter, i);
853 	if (iter.pos == iocb->ki_pos)
854 		return ret;
855 	return iter.pos - iocb->ki_pos;
856 }
857 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
858 
859 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
860 {
861 	struct iomap *iomap = &iter->iomap;
862 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
863 	loff_t pos = iter->pos;
864 	loff_t length = iomap_length(iter);
865 	long status = 0;
866 	loff_t written = 0;
867 
868 	/* don't bother with blocks that are not shared to start with */
869 	if (!(iomap->flags & IOMAP_F_SHARED))
870 		return length;
871 	/* don't bother with holes or unwritten extents */
872 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
873 		return length;
874 
875 	do {
876 		unsigned long offset = offset_in_page(pos);
877 		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
878 		struct folio *folio;
879 
880 		status = iomap_write_begin(iter, pos, bytes, &folio);
881 		if (unlikely(status))
882 			return status;
883 
884 		status = iomap_write_end(iter, pos, bytes, bytes, folio);
885 		if (WARN_ON_ONCE(status == 0))
886 			return -EIO;
887 
888 		cond_resched();
889 
890 		pos += status;
891 		written += status;
892 		length -= status;
893 
894 		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
895 	} while (length);
896 
897 	return written;
898 }
899 
900 int
901 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
902 		const struct iomap_ops *ops)
903 {
904 	struct iomap_iter iter = {
905 		.inode		= inode,
906 		.pos		= pos,
907 		.len		= len,
908 		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
909 	};
910 	int ret;
911 
912 	while ((ret = iomap_iter(&iter, ops)) > 0)
913 		iter.processed = iomap_unshare_iter(&iter);
914 	return ret;
915 }
916 EXPORT_SYMBOL_GPL(iomap_file_unshare);
917 
918 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
919 {
920 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
921 	loff_t pos = iter->pos;
922 	loff_t length = iomap_length(iter);
923 	loff_t written = 0;
924 
925 	/* already zeroed?  we're done. */
926 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
927 		return length;
928 
929 	do {
930 		struct folio *folio;
931 		int status;
932 		size_t offset;
933 		size_t bytes = min_t(u64, SIZE_MAX, length);
934 
935 		status = iomap_write_begin(iter, pos, bytes, &folio);
936 		if (status)
937 			return status;
938 
939 		offset = offset_in_folio(folio, pos);
940 		if (bytes > folio_size(folio) - offset)
941 			bytes = folio_size(folio) - offset;
942 
943 		folio_zero_range(folio, offset, bytes);
944 		folio_mark_accessed(folio);
945 
946 		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
947 		if (WARN_ON_ONCE(bytes == 0))
948 			return -EIO;
949 
950 		pos += bytes;
951 		length -= bytes;
952 		written += bytes;
953 		if (did_zero)
954 			*did_zero = true;
955 	} while (length > 0);
956 
957 	return written;
958 }
959 
960 int
961 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
962 		const struct iomap_ops *ops)
963 {
964 	struct iomap_iter iter = {
965 		.inode		= inode,
966 		.pos		= pos,
967 		.len		= len,
968 		.flags		= IOMAP_ZERO,
969 	};
970 	int ret;
971 
972 	while ((ret = iomap_iter(&iter, ops)) > 0)
973 		iter.processed = iomap_zero_iter(&iter, did_zero);
974 	return ret;
975 }
976 EXPORT_SYMBOL_GPL(iomap_zero_range);
977 
978 int
979 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
980 		const struct iomap_ops *ops)
981 {
982 	unsigned int blocksize = i_blocksize(inode);
983 	unsigned int off = pos & (blocksize - 1);
984 
985 	/* Block boundary? Nothing to do */
986 	if (!off)
987 		return 0;
988 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
989 }
990 EXPORT_SYMBOL_GPL(iomap_truncate_page);
991 
992 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
993 		struct folio *folio)
994 {
995 	loff_t length = iomap_length(iter);
996 	int ret;
997 
998 	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
999 		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1000 					      &iter->iomap);
1001 		if (ret)
1002 			return ret;
1003 		block_commit_write(&folio->page, 0, length);
1004 	} else {
1005 		WARN_ON_ONCE(!folio_test_uptodate(folio));
1006 		folio_mark_dirty(folio);
1007 	}
1008 
1009 	return length;
1010 }
1011 
1012 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1013 {
1014 	struct iomap_iter iter = {
1015 		.inode		= file_inode(vmf->vma->vm_file),
1016 		.flags		= IOMAP_WRITE | IOMAP_FAULT,
1017 	};
1018 	struct folio *folio = page_folio(vmf->page);
1019 	ssize_t ret;
1020 
1021 	folio_lock(folio);
1022 	ret = folio_mkwrite_check_truncate(folio, iter.inode);
1023 	if (ret < 0)
1024 		goto out_unlock;
1025 	iter.pos = folio_pos(folio);
1026 	iter.len = ret;
1027 	while ((ret = iomap_iter(&iter, ops)) > 0)
1028 		iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1029 
1030 	if (ret < 0)
1031 		goto out_unlock;
1032 	folio_wait_stable(folio);
1033 	return VM_FAULT_LOCKED;
1034 out_unlock:
1035 	folio_unlock(folio);
1036 	return block_page_mkwrite_return(ret);
1037 }
1038 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1039 
1040 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1041 		size_t len, int error)
1042 {
1043 	struct iomap_page *iop = to_iomap_page(folio);
1044 
1045 	if (error) {
1046 		folio_set_error(folio);
1047 		mapping_set_error(inode->i_mapping, error);
1048 	}
1049 
1050 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
1051 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1052 
1053 	if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1054 		folio_end_writeback(folio);
1055 }
1056 
1057 /*
1058  * We're now finished for good with this ioend structure.  Update the page
1059  * state, release holds on bios, and finally free up memory.  Do not use the
1060  * ioend after this.
1061  */
1062 static u32
1063 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1064 {
1065 	struct inode *inode = ioend->io_inode;
1066 	struct bio *bio = &ioend->io_inline_bio;
1067 	struct bio *last = ioend->io_bio, *next;
1068 	u64 start = bio->bi_iter.bi_sector;
1069 	loff_t offset = ioend->io_offset;
1070 	bool quiet = bio_flagged(bio, BIO_QUIET);
1071 	u32 folio_count = 0;
1072 
1073 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
1074 		struct folio_iter fi;
1075 
1076 		/*
1077 		 * For the last bio, bi_private points to the ioend, so we
1078 		 * need to explicitly end the iteration here.
1079 		 */
1080 		if (bio == last)
1081 			next = NULL;
1082 		else
1083 			next = bio->bi_private;
1084 
1085 		/* walk all folios in bio, ending page IO on them */
1086 		bio_for_each_folio_all(fi, bio) {
1087 			iomap_finish_folio_write(inode, fi.folio, fi.length,
1088 					error);
1089 			folio_count++;
1090 		}
1091 		bio_put(bio);
1092 	}
1093 	/* The ioend has been freed by bio_put() */
1094 
1095 	if (unlikely(error && !quiet)) {
1096 		printk_ratelimited(KERN_ERR
1097 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1098 			inode->i_sb->s_id, inode->i_ino, offset, start);
1099 	}
1100 	return folio_count;
1101 }
1102 
1103 /*
1104  * Ioend completion routine for merged bios. This can only be called from task
1105  * contexts as merged ioends can be of unbound length. Hence we have to break up
1106  * the writeback completions into manageable chunks to avoid long scheduler
1107  * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1108  * good batch processing throughput without creating adverse scheduler latency
1109  * conditions.
1110  */
1111 void
1112 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1113 {
1114 	struct list_head tmp;
1115 	u32 completions;
1116 
1117 	might_sleep();
1118 
1119 	list_replace_init(&ioend->io_list, &tmp);
1120 	completions = iomap_finish_ioend(ioend, error);
1121 
1122 	while (!list_empty(&tmp)) {
1123 		if (completions > IOEND_BATCH_SIZE * 8) {
1124 			cond_resched();
1125 			completions = 0;
1126 		}
1127 		ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1128 		list_del_init(&ioend->io_list);
1129 		completions += iomap_finish_ioend(ioend, error);
1130 	}
1131 }
1132 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1133 
1134 /*
1135  * We can merge two adjacent ioends if they have the same set of work to do.
1136  */
1137 static bool
1138 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1139 {
1140 	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1141 		return false;
1142 	if ((ioend->io_flags & IOMAP_F_SHARED) ^
1143 	    (next->io_flags & IOMAP_F_SHARED))
1144 		return false;
1145 	if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1146 	    (next->io_type == IOMAP_UNWRITTEN))
1147 		return false;
1148 	if (ioend->io_offset + ioend->io_size != next->io_offset)
1149 		return false;
1150 	/*
1151 	 * Do not merge physically discontiguous ioends. The filesystem
1152 	 * completion functions will have to iterate the physical
1153 	 * discontiguities even if we merge the ioends at a logical level, so
1154 	 * we don't gain anything by merging physical discontiguities here.
1155 	 *
1156 	 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1157 	 * submission so does not point to the start sector of the bio at
1158 	 * completion.
1159 	 */
1160 	if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1161 		return false;
1162 	return true;
1163 }
1164 
1165 void
1166 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1167 {
1168 	struct iomap_ioend *next;
1169 
1170 	INIT_LIST_HEAD(&ioend->io_list);
1171 
1172 	while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1173 			io_list))) {
1174 		if (!iomap_ioend_can_merge(ioend, next))
1175 			break;
1176 		list_move_tail(&next->io_list, &ioend->io_list);
1177 		ioend->io_size += next->io_size;
1178 	}
1179 }
1180 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1181 
1182 static int
1183 iomap_ioend_compare(void *priv, const struct list_head *a,
1184 		const struct list_head *b)
1185 {
1186 	struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1187 	struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1188 
1189 	if (ia->io_offset < ib->io_offset)
1190 		return -1;
1191 	if (ia->io_offset > ib->io_offset)
1192 		return 1;
1193 	return 0;
1194 }
1195 
1196 void
1197 iomap_sort_ioends(struct list_head *ioend_list)
1198 {
1199 	list_sort(NULL, ioend_list, iomap_ioend_compare);
1200 }
1201 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1202 
1203 static void iomap_writepage_end_bio(struct bio *bio)
1204 {
1205 	struct iomap_ioend *ioend = bio->bi_private;
1206 
1207 	iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1208 }
1209 
1210 /*
1211  * Submit the final bio for an ioend.
1212  *
1213  * If @error is non-zero, it means that we have a situation where some part of
1214  * the submission process has failed after we've marked pages for writeback
1215  * and unlocked them.  In this situation, we need to fail the bio instead of
1216  * submitting it.  This typically only happens on a filesystem shutdown.
1217  */
1218 static int
1219 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1220 		int error)
1221 {
1222 	ioend->io_bio->bi_private = ioend;
1223 	ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1224 
1225 	if (wpc->ops->prepare_ioend)
1226 		error = wpc->ops->prepare_ioend(ioend, error);
1227 	if (error) {
1228 		/*
1229 		 * If we're failing the IO now, just mark the ioend with an
1230 		 * error and finish it.  This will run IO completion immediately
1231 		 * as there is only one reference to the ioend at this point in
1232 		 * time.
1233 		 */
1234 		ioend->io_bio->bi_status = errno_to_blk_status(error);
1235 		bio_endio(ioend->io_bio);
1236 		return error;
1237 	}
1238 
1239 	submit_bio(ioend->io_bio);
1240 	return 0;
1241 }
1242 
1243 static struct iomap_ioend *
1244 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1245 		loff_t offset, sector_t sector, struct writeback_control *wbc)
1246 {
1247 	struct iomap_ioend *ioend;
1248 	struct bio *bio;
1249 
1250 	bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1251 			       REQ_OP_WRITE | wbc_to_write_flags(wbc),
1252 			       GFP_NOFS, &iomap_ioend_bioset);
1253 	bio->bi_iter.bi_sector = sector;
1254 	wbc_init_bio(wbc, bio);
1255 
1256 	ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1257 	INIT_LIST_HEAD(&ioend->io_list);
1258 	ioend->io_type = wpc->iomap.type;
1259 	ioend->io_flags = wpc->iomap.flags;
1260 	ioend->io_inode = inode;
1261 	ioend->io_size = 0;
1262 	ioend->io_folios = 0;
1263 	ioend->io_offset = offset;
1264 	ioend->io_bio = bio;
1265 	ioend->io_sector = sector;
1266 	return ioend;
1267 }
1268 
1269 /*
1270  * Allocate a new bio, and chain the old bio to the new one.
1271  *
1272  * Note that we have to perform the chaining in this unintuitive order
1273  * so that the bi_private linkage is set up in the right direction for the
1274  * traversal in iomap_finish_ioend().
1275  */
1276 static struct bio *
1277 iomap_chain_bio(struct bio *prev)
1278 {
1279 	struct bio *new;
1280 
1281 	new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1282 	bio_clone_blkg_association(new, prev);
1283 	new->bi_iter.bi_sector = bio_end_sector(prev);
1284 
1285 	bio_chain(prev, new);
1286 	bio_get(prev);		/* for iomap_finish_ioend */
1287 	submit_bio(prev);
1288 	return new;
1289 }
1290 
1291 static bool
1292 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1293 		sector_t sector)
1294 {
1295 	if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1296 	    (wpc->ioend->io_flags & IOMAP_F_SHARED))
1297 		return false;
1298 	if (wpc->iomap.type != wpc->ioend->io_type)
1299 		return false;
1300 	if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1301 		return false;
1302 	if (sector != bio_end_sector(wpc->ioend->io_bio))
1303 		return false;
1304 	/*
1305 	 * Limit ioend bio chain lengths to minimise IO completion latency. This
1306 	 * also prevents long tight loops ending page writeback on all the
1307 	 * folios in the ioend.
1308 	 */
1309 	if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1310 		return false;
1311 	return true;
1312 }
1313 
1314 /*
1315  * Test to see if we have an existing ioend structure that we could append to
1316  * first; otherwise finish off the current ioend and start another.
1317  */
1318 static void
1319 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1320 		struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1321 		struct writeback_control *wbc, struct list_head *iolist)
1322 {
1323 	sector_t sector = iomap_sector(&wpc->iomap, pos);
1324 	unsigned len = i_blocksize(inode);
1325 	size_t poff = offset_in_folio(folio, pos);
1326 
1327 	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1328 		if (wpc->ioend)
1329 			list_add(&wpc->ioend->io_list, iolist);
1330 		wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1331 	}
1332 
1333 	if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1334 		wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1335 		bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
1336 	}
1337 
1338 	if (iop)
1339 		atomic_add(len, &iop->write_bytes_pending);
1340 	wpc->ioend->io_size += len;
1341 	wbc_account_cgroup_owner(wbc, &folio->page, len);
1342 }
1343 
1344 /*
1345  * We implement an immediate ioend submission policy here to avoid needing to
1346  * chain multiple ioends and hence nest mempool allocations which can violate
1347  * the forward progress guarantees we need to provide. The current ioend we're
1348  * adding blocks to is cached in the writepage context, and if the new block
1349  * doesn't append to the cached ioend, it will create a new ioend and cache that
1350  * instead.
1351  *
1352  * If a new ioend is created and cached, the old ioend is returned and queued
1353  * locally for submission once the entire page is processed or an error has been
1354  * detected.  While ioends are submitted immediately after they are completed,
1355  * batching optimisations are provided by higher level block plugging.
1356  *
1357  * At the end of a writeback pass, there will be a cached ioend remaining on the
1358  * writepage context that the caller will need to submit.
1359  */
1360 static int
1361 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1362 		struct writeback_control *wbc, struct inode *inode,
1363 		struct folio *folio, u64 end_pos)
1364 {
1365 	struct iomap_page *iop = iomap_page_create(inode, folio, 0);
1366 	struct iomap_ioend *ioend, *next;
1367 	unsigned len = i_blocksize(inode);
1368 	unsigned nblocks = i_blocks_per_folio(inode, folio);
1369 	u64 pos = folio_pos(folio);
1370 	int error = 0, count = 0, i;
1371 	LIST_HEAD(submit_list);
1372 
1373 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1374 
1375 	/*
1376 	 * Walk through the folio to find areas to write back. If we
1377 	 * run off the end of the current map or find the current map
1378 	 * invalid, grab a new one.
1379 	 */
1380 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1381 		if (iop && !test_bit(i, iop->uptodate))
1382 			continue;
1383 
1384 		error = wpc->ops->map_blocks(wpc, inode, pos);
1385 		if (error)
1386 			break;
1387 		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1388 			continue;
1389 		if (wpc->iomap.type == IOMAP_HOLE)
1390 			continue;
1391 		iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
1392 				 &submit_list);
1393 		count++;
1394 	}
1395 	if (count)
1396 		wpc->ioend->io_folios++;
1397 
1398 	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1399 	WARN_ON_ONCE(!folio_test_locked(folio));
1400 	WARN_ON_ONCE(folio_test_writeback(folio));
1401 	WARN_ON_ONCE(folio_test_dirty(folio));
1402 
1403 	/*
1404 	 * We cannot cancel the ioend directly here on error.  We may have
1405 	 * already set other pages under writeback and hence we have to run I/O
1406 	 * completion to mark the error state of the pages under writeback
1407 	 * appropriately.
1408 	 */
1409 	if (unlikely(error)) {
1410 		/*
1411 		 * Let the filesystem know what portion of the current page
1412 		 * failed to map. If the page hasn't been added to ioend, it
1413 		 * won't be affected by I/O completion and we must unlock it
1414 		 * now.
1415 		 */
1416 		if (wpc->ops->discard_folio)
1417 			wpc->ops->discard_folio(folio, pos);
1418 		if (!count) {
1419 			folio_unlock(folio);
1420 			goto done;
1421 		}
1422 	}
1423 
1424 	folio_start_writeback(folio);
1425 	folio_unlock(folio);
1426 
1427 	/*
1428 	 * Preserve the original error if there was one; catch
1429 	 * submission errors here and propagate into subsequent ioend
1430 	 * submissions.
1431 	 */
1432 	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1433 		int error2;
1434 
1435 		list_del_init(&ioend->io_list);
1436 		error2 = iomap_submit_ioend(wpc, ioend, error);
1437 		if (error2 && !error)
1438 			error = error2;
1439 	}
1440 
1441 	/*
1442 	 * We can end up here with no error and nothing to write only if we race
1443 	 * with a partial page truncate on a sub-page block sized filesystem.
1444 	 */
1445 	if (!count)
1446 		folio_end_writeback(folio);
1447 done:
1448 	mapping_set_error(folio->mapping, error);
1449 	return error;
1450 }
1451 
1452 /*
1453  * Write out a dirty page.
1454  *
1455  * For delalloc space on the page, we need to allocate space and flush it.
1456  * For unwritten space on the page, we need to start the conversion to
1457  * regular allocated space.
1458  */
1459 static int
1460 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1461 {
1462 	struct folio *folio = page_folio(page);
1463 	struct iomap_writepage_ctx *wpc = data;
1464 	struct inode *inode = folio->mapping->host;
1465 	u64 end_pos, isize;
1466 
1467 	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1468 
1469 	/*
1470 	 * Refuse to write the folio out if we're called from reclaim context.
1471 	 *
1472 	 * This avoids stack overflows when called from deeply used stacks in
1473 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
1474 	 * allow reclaim from kswapd as the stack usage there is relatively low.
1475 	 *
1476 	 * This should never happen except in the case of a VM regression so
1477 	 * warn about it.
1478 	 */
1479 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1480 			PF_MEMALLOC))
1481 		goto redirty;
1482 
1483 	/*
1484 	 * Is this folio beyond the end of the file?
1485 	 *
1486 	 * The folio index is less than the end_index, adjust the end_pos
1487 	 * to the highest offset that this folio should represent.
1488 	 * -----------------------------------------------------
1489 	 * |			file mapping	       | <EOF> |
1490 	 * -----------------------------------------------------
1491 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1492 	 * ^--------------------------------^----------|--------
1493 	 * |     desired writeback range    |      see else    |
1494 	 * ---------------------------------^------------------|
1495 	 */
1496 	isize = i_size_read(inode);
1497 	end_pos = folio_pos(folio) + folio_size(folio);
1498 	if (end_pos > isize) {
1499 		/*
1500 		 * Check whether the page to write out is beyond or straddles
1501 		 * i_size or not.
1502 		 * -------------------------------------------------------
1503 		 * |		file mapping		        | <EOF>  |
1504 		 * -------------------------------------------------------
1505 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1506 		 * ^--------------------------------^-----------|---------
1507 		 * |				    |      Straddles     |
1508 		 * ---------------------------------^-----------|--------|
1509 		 */
1510 		size_t poff = offset_in_folio(folio, isize);
1511 		pgoff_t end_index = isize >> PAGE_SHIFT;
1512 
1513 		/*
1514 		 * Skip the page if it's fully outside i_size, e.g. due to a
1515 		 * truncate operation that's in progress. We must redirty the
1516 		 * page so that reclaim stops reclaiming it. Otherwise
1517 		 * iomap_release_folio() is called on it and gets confused.
1518 		 *
1519 		 * Note that the end_index is unsigned long.  If the given
1520 		 * offset is greater than 16TB on a 32-bit system then if we
1521 		 * checked if the page is fully outside i_size with
1522 		 * "if (page->index >= end_index + 1)", "end_index + 1" would
1523 		 * overflow and evaluate to 0.  Hence this page would be
1524 		 * redirtied and written out repeatedly, which would result in
1525 		 * an infinite loop; the user program performing this operation
1526 		 * would hang.  Instead, we can detect this situation by
1527 		 * checking if the page is totally beyond i_size or if its
1528 		 * offset is just equal to the EOF.
1529 		 */
1530 		if (folio->index > end_index ||
1531 		    (folio->index == end_index && poff == 0))
1532 			goto redirty;
1533 
1534 		/*
1535 		 * The page straddles i_size.  It must be zeroed out on each
1536 		 * and every writepage invocation because it may be mmapped.
1537 		 * "A file is mapped in multiples of the page size.  For a file
1538 		 * that is not a multiple of the page size, the remaining
1539 		 * memory is zeroed when mapped, and writes to that region are
1540 		 * not written out to the file."
1541 		 */
1542 		folio_zero_segment(folio, poff, folio_size(folio));
1543 		end_pos = isize;
1544 	}
1545 
1546 	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1547 
1548 redirty:
1549 	folio_redirty_for_writepage(wbc, folio);
1550 	folio_unlock(folio);
1551 	return 0;
1552 }
1553 
1554 int
1555 iomap_writepage(struct page *page, struct writeback_control *wbc,
1556 		struct iomap_writepage_ctx *wpc,
1557 		const struct iomap_writeback_ops *ops)
1558 {
1559 	int ret;
1560 
1561 	wpc->ops = ops;
1562 	ret = iomap_do_writepage(page, wbc, wpc);
1563 	if (!wpc->ioend)
1564 		return ret;
1565 	return iomap_submit_ioend(wpc, wpc->ioend, ret);
1566 }
1567 EXPORT_SYMBOL_GPL(iomap_writepage);
1568 
1569 int
1570 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1571 		struct iomap_writepage_ctx *wpc,
1572 		const struct iomap_writeback_ops *ops)
1573 {
1574 	int			ret;
1575 
1576 	wpc->ops = ops;
1577 	ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1578 	if (!wpc->ioend)
1579 		return ret;
1580 	return iomap_submit_ioend(wpc, wpc->ioend, ret);
1581 }
1582 EXPORT_SYMBOL_GPL(iomap_writepages);
1583 
1584 static int __init iomap_init(void)
1585 {
1586 	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1587 			   offsetof(struct iomap_ioend, io_inline_bio),
1588 			   BIOSET_NEED_BVECS);
1589 }
1590 fs_initcall(iomap_init);
1591