1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ab08b01eSChristoph Hellwig /* 2595c4cd05SMatthew Wilcox (Oracle) * Structure allocated for each folio when block size < folio size 2695c4cd05SMatthew Wilcox (Oracle) * to track sub-folio uptodate status and I/O completions. 27ab08b01eSChristoph Hellwig */ 28ab08b01eSChristoph Hellwig struct iomap_page { 297d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending; 300fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending; 311cea335dSChristoph Hellwig spinlock_t uptodate_lock; 320a195b91SMatthew Wilcox (Oracle) unsigned long uptodate[]; 33ab08b01eSChristoph Hellwig }; 34ab08b01eSChristoph Hellwig 3595c4cd05SMatthew Wilcox (Oracle) static inline struct iomap_page *to_iomap_page(struct folio *folio) 36ab08b01eSChristoph Hellwig { 3795c4cd05SMatthew Wilcox (Oracle) if (folio_test_private(folio)) 3895c4cd05SMatthew Wilcox (Oracle) return folio_get_private(folio); 39ab08b01eSChristoph Hellwig return NULL; 40ab08b01eSChristoph Hellwig } 41ab08b01eSChristoph Hellwig 42598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 43598ecfbaSChristoph Hellwig 44afc51aaaSDarrick J. Wong static struct iomap_page * 45435d44b3SMatthew Wilcox (Oracle) iomap_page_create(struct inode *inode, struct folio *folio) 46afc51aaaSDarrick J. Wong { 4795c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 48435d44b3SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 49afc51aaaSDarrick J. Wong 500a195b91SMatthew Wilcox (Oracle) if (iop || nr_blocks <= 1) 51afc51aaaSDarrick J. Wong return iop; 52afc51aaaSDarrick J. Wong 530a195b91SMatthew Wilcox (Oracle) iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 540a195b91SMatthew Wilcox (Oracle) GFP_NOFS | __GFP_NOFAIL); 551cea335dSChristoph Hellwig spin_lock_init(&iop->uptodate_lock); 56435d44b3SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 574595a298SMatthew Wilcox (Oracle) bitmap_fill(iop->uptodate, nr_blocks); 58435d44b3SMatthew Wilcox (Oracle) folio_attach_private(folio, iop); 59afc51aaaSDarrick J. Wong return iop; 60afc51aaaSDarrick J. Wong } 61afc51aaaSDarrick J. Wong 62c46e8324SMatthew Wilcox (Oracle) static void iomap_page_release(struct folio *folio) 63afc51aaaSDarrick J. Wong { 64c46e8324SMatthew Wilcox (Oracle) struct iomap_page *iop = folio_detach_private(folio); 65c46e8324SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 66c46e8324SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 67afc51aaaSDarrick J. Wong 68afc51aaaSDarrick J. Wong if (!iop) 69afc51aaaSDarrick J. Wong return; 707d636676SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 710fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 720a195b91SMatthew Wilcox (Oracle) WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 73c46e8324SMatthew Wilcox (Oracle) folio_test_uptodate(folio)); 74afc51aaaSDarrick J. Wong kfree(iop); 75afc51aaaSDarrick J. Wong } 76afc51aaaSDarrick J. Wong 77afc51aaaSDarrick J. Wong /* 78431c0566SMatthew Wilcox (Oracle) * Calculate the range inside the folio that we actually need to read. 79afc51aaaSDarrick J. Wong */ 80431c0566SMatthew Wilcox (Oracle) static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 81431c0566SMatthew Wilcox (Oracle) loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 82afc51aaaSDarrick J. Wong { 83431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 84afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 85afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 86afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 87afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 88431c0566SMatthew Wilcox (Oracle) size_t poff = offset_in_folio(folio, *pos); 89431c0566SMatthew Wilcox (Oracle) size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 90afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 91afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 92afc51aaaSDarrick J. Wong 93afc51aaaSDarrick J. Wong /* 94f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the 95afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 96afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 97afc51aaaSDarrick J. Wong */ 98afc51aaaSDarrick J. Wong if (iop) { 99afc51aaaSDarrick J. Wong unsigned int i; 100afc51aaaSDarrick J. Wong 101afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 102afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 103afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 104afc51aaaSDarrick J. Wong break; 105afc51aaaSDarrick J. Wong *pos += block_size; 106afc51aaaSDarrick J. Wong poff += block_size; 107afc51aaaSDarrick J. Wong plen -= block_size; 108afc51aaaSDarrick J. Wong first++; 109afc51aaaSDarrick J. Wong } 110afc51aaaSDarrick J. Wong 111afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 112afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 113afc51aaaSDarrick J. Wong if (test_bit(i, iop->uptodate)) { 114afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 115afc51aaaSDarrick J. Wong last = i - 1; 116afc51aaaSDarrick J. Wong break; 117afc51aaaSDarrick J. Wong } 118afc51aaaSDarrick J. Wong } 119afc51aaaSDarrick J. Wong } 120afc51aaaSDarrick J. Wong 121afc51aaaSDarrick J. Wong /* 122f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to 123afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 124afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 125afc51aaaSDarrick J. Wong */ 126afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 127431c0566SMatthew Wilcox (Oracle) unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 128afc51aaaSDarrick J. Wong 129afc51aaaSDarrick J. Wong if (first <= end && last > end) 130afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 131afc51aaaSDarrick J. Wong } 132afc51aaaSDarrick J. Wong 133afc51aaaSDarrick J. Wong *offp = poff; 134afc51aaaSDarrick J. Wong *lenp = plen; 135afc51aaaSDarrick J. Wong } 136afc51aaaSDarrick J. Wong 137431c0566SMatthew Wilcox (Oracle) static void iomap_iop_set_range_uptodate(struct folio *folio, 138431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop, size_t off, size_t len) 139afc51aaaSDarrick J. Wong { 140431c0566SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host; 141afc51aaaSDarrick J. Wong unsigned first = off >> inode->i_blkbits; 142afc51aaaSDarrick J. Wong unsigned last = (off + len - 1) >> inode->i_blkbits; 1431cea335dSChristoph Hellwig unsigned long flags; 144afc51aaaSDarrick J. Wong 1451cea335dSChristoph Hellwig spin_lock_irqsave(&iop->uptodate_lock, flags); 146b21866f5SMatthew Wilcox (Oracle) bitmap_set(iop->uptodate, first, last - first + 1); 147431c0566SMatthew Wilcox (Oracle) if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 148431c0566SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 1491cea335dSChristoph Hellwig spin_unlock_irqrestore(&iop->uptodate_lock, flags); 150afc51aaaSDarrick J. Wong } 151afc51aaaSDarrick J. Wong 152431c0566SMatthew Wilcox (Oracle) static void iomap_set_range_uptodate(struct folio *folio, 153431c0566SMatthew Wilcox (Oracle) struct iomap_page *iop, size_t off, size_t len) 1541cea335dSChristoph Hellwig { 155431c0566SMatthew Wilcox (Oracle) if (folio_test_error(folio)) 1561cea335dSChristoph Hellwig return; 1571cea335dSChristoph Hellwig 158cd1e5afeSMatthew Wilcox (Oracle) if (iop) 159431c0566SMatthew Wilcox (Oracle) iomap_iop_set_range_uptodate(folio, iop, off, len); 1601cea335dSChristoph Hellwig else 161431c0566SMatthew Wilcox (Oracle) folio_mark_uptodate(folio); 162afc51aaaSDarrick J. Wong } 163afc51aaaSDarrick J. Wong 1648ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_read(struct folio *folio, size_t offset, 1658ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 166afc51aaaSDarrick J. Wong { 16795c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 168afc51aaaSDarrick J. Wong 169afc51aaaSDarrick J. Wong if (unlikely(error)) { 1708ffd74e9SMatthew Wilcox (Oracle) folio_clear_uptodate(folio); 1718ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 172afc51aaaSDarrick J. Wong } else { 173431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset, len); 174afc51aaaSDarrick J. Wong } 175afc51aaaSDarrick J. Wong 1768ffd74e9SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 1778ffd74e9SMatthew Wilcox (Oracle) folio_unlock(folio); 178afc51aaaSDarrick J. Wong } 179afc51aaaSDarrick J. Wong 1808ffd74e9SMatthew Wilcox (Oracle) static void iomap_read_end_io(struct bio *bio) 181afc51aaaSDarrick J. Wong { 182afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 1838ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 184afc51aaaSDarrick J. Wong 1858ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio) 1868ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 187afc51aaaSDarrick J. Wong bio_put(bio); 188afc51aaaSDarrick J. Wong } 189afc51aaaSDarrick J. Wong 190afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 1913aa9c659SMatthew Wilcox (Oracle) struct folio *cur_folio; 1923aa9c659SMatthew Wilcox (Oracle) bool cur_folio_in_bio; 193afc51aaaSDarrick J. Wong struct bio *bio; 1949d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac; 195afc51aaaSDarrick J. Wong }; 196afc51aaaSDarrick J. Wong 1975ad448ceSAndreas Gruenbacher /** 1985ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache 1995ad448ceSAndreas Gruenbacher * @iter: iteration structure 200874628a2SMatthew Wilcox (Oracle) * @folio: folio to copy to 2015ad448ceSAndreas Gruenbacher * 202874628a2SMatthew Wilcox (Oracle) * Copy the inline data in @iter into @folio and zero out the rest of the folio. 2035ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file. 2045ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno. 2055ad448ceSAndreas Gruenbacher */ 2065ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter, 207874628a2SMatthew Wilcox (Oracle) struct folio *folio) 208afc51aaaSDarrick J. Wong { 209cd1e5afeSMatthew Wilcox (Oracle) struct iomap_page *iop; 210fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter); 2111b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset; 212b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset); 213431c0566SMatthew Wilcox (Oracle) size_t offset = offset_in_folio(folio, iomap->offset); 214afc51aaaSDarrick J. Wong void *addr; 215afc51aaaSDarrick J. Wong 216874628a2SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 2175ad448ceSAndreas Gruenbacher return 0; 218afc51aaaSDarrick J. Wong 219ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 220ae44f9c2SMatthew Wilcox (Oracle) return -EIO; 22169f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE - 22269f4a26cSGao Xiang offset_in_page(iomap->inline_data))) 22369f4a26cSGao Xiang return -EIO; 22469f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length)) 22569f4a26cSGao Xiang return -EIO; 226431c0566SMatthew Wilcox (Oracle) if (offset > 0) 227cd1e5afeSMatthew Wilcox (Oracle) iop = iomap_page_create(iter->inode, folio); 228cd1e5afeSMatthew Wilcox (Oracle) else 229cd1e5afeSMatthew Wilcox (Oracle) iop = to_iomap_page(folio); 230afc51aaaSDarrick J. Wong 231874628a2SMatthew Wilcox (Oracle) addr = kmap_local_folio(folio, offset); 232afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 233b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size); 234ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 235431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 2365ad448ceSAndreas Gruenbacher return 0; 237afc51aaaSDarrick J. Wong } 238afc51aaaSDarrick J. Wong 239fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 2401b5c1e36SChristoph Hellwig loff_t pos) 241009d8d84SChristoph Hellwig { 242fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 2431b5c1e36SChristoph Hellwig 2441b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED || 2451b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) || 2461b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode); 247009d8d84SChristoph Hellwig } 248009d8d84SChristoph Hellwig 249fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 250f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset) 251afc51aaaSDarrick J. Wong { 252fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 253f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset; 254f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset; 2553aa9c659SMatthew Wilcox (Oracle) struct folio *folio = ctx->cur_folio; 256637d3375SAndreas Gruenbacher struct iomap_page *iop; 257afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 258431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 259afc51aaaSDarrick J. Wong sector_t sector; 260afc51aaaSDarrick J. Wong 2615ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE) 262874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 263afc51aaaSDarrick J. Wong 264afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 265435d44b3SMatthew Wilcox (Oracle) iop = iomap_page_create(iter->inode, folio); 266431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 267afc51aaaSDarrick J. Wong if (plen == 0) 268afc51aaaSDarrick J. Wong goto done; 269afc51aaaSDarrick J. Wong 2701b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) { 271431c0566SMatthew Wilcox (Oracle) folio_zero_range(folio, poff, plen); 272431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, poff, plen); 273afc51aaaSDarrick J. Wong goto done; 274afc51aaaSDarrick J. Wong } 275afc51aaaSDarrick J. Wong 2763aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = true; 2777d636676SMatthew Wilcox (Oracle) if (iop) 2787d636676SMatthew Wilcox (Oracle) atomic_add(plen, &iop->read_bytes_pending); 279afc51aaaSDarrick J. Wong 280afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 281d0364f94SChristoph Hellwig if (!ctx->bio || 282d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector || 283431c0566SMatthew Wilcox (Oracle) !bio_add_folio(ctx->bio, folio, plen, poff)) { 2843aa9c659SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 285457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp; 2865f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 287afc51aaaSDarrick J. Wong 288afc51aaaSDarrick J. Wong if (ctx->bio) 289afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 290afc51aaaSDarrick J. Wong 2919d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */ 292afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 2935f7136dbSMatthew Wilcox (Oracle) ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); 294457df33eSMatthew Wilcox (Oracle) /* 295457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to 296457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates 297457df33eSMatthew Wilcox (Oracle) * what do_mpage_readpage does. 298457df33eSMatthew Wilcox (Oracle) */ 299457df33eSMatthew Wilcox (Oracle) if (!ctx->bio) 300457df33eSMatthew Wilcox (Oracle) ctx->bio = bio_alloc(orig_gfp, 1); 301afc51aaaSDarrick J. Wong ctx->bio->bi_opf = REQ_OP_READ; 3029d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) 303afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 304afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 305afc51aaaSDarrick J. Wong bio_set_dev(ctx->bio, iomap->bdev); 306afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 307431c0566SMatthew Wilcox (Oracle) bio_add_folio(ctx->bio, folio, plen, poff); 308afc51aaaSDarrick J. Wong } 309431c0566SMatthew Wilcox (Oracle) 310afc51aaaSDarrick J. Wong done: 311afc51aaaSDarrick J. Wong /* 312afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 313f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but 314afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 315afc51aaaSDarrick J. Wong * iteration. 316afc51aaaSDarrick J. Wong */ 317afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 318afc51aaaSDarrick J. Wong } 319afc51aaaSDarrick J. Wong 320afc51aaaSDarrick J. Wong int 321afc51aaaSDarrick J. Wong iomap_readpage(struct page *page, const struct iomap_ops *ops) 322afc51aaaSDarrick J. Wong { 3233aa9c659SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 324f6d48000SChristoph Hellwig struct iomap_iter iter = { 3253aa9c659SMatthew Wilcox (Oracle) .inode = folio->mapping->host, 3263aa9c659SMatthew Wilcox (Oracle) .pos = folio_pos(folio), 3273aa9c659SMatthew Wilcox (Oracle) .len = folio_size(folio), 328f6d48000SChristoph Hellwig }; 329f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = { 3303aa9c659SMatthew Wilcox (Oracle) .cur_folio = folio, 331f6d48000SChristoph Hellwig }; 332f6d48000SChristoph Hellwig int ret; 333afc51aaaSDarrick J. Wong 3343aa9c659SMatthew Wilcox (Oracle) trace_iomap_readpage(iter.inode, 1); 3359e91c572SChristoph Hellwig 336f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 337f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 338f6d48000SChristoph Hellwig 339f6d48000SChristoph Hellwig if (ret < 0) 3403aa9c659SMatthew Wilcox (Oracle) folio_set_error(folio); 341afc51aaaSDarrick J. Wong 342afc51aaaSDarrick J. Wong if (ctx.bio) { 343afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 3443aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(!ctx.cur_folio_in_bio); 345afc51aaaSDarrick J. Wong } else { 3463aa9c659SMatthew Wilcox (Oracle) WARN_ON_ONCE(ctx.cur_folio_in_bio); 3473aa9c659SMatthew Wilcox (Oracle) folio_unlock(folio); 348afc51aaaSDarrick J. Wong } 349afc51aaaSDarrick J. Wong 350afc51aaaSDarrick J. Wong /* 351f1f264b4SAndreas Gruenbacher * Just like mpage_readahead and block_read_full_page, we always 352afc51aaaSDarrick J. Wong * return 0 and just mark the page as PageError on errors. This 353f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually. 354afc51aaaSDarrick J. Wong */ 355afc51aaaSDarrick J. Wong return 0; 356afc51aaaSDarrick J. Wong } 357afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpage); 358afc51aaaSDarrick J. Wong 359fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 360f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx) 361afc51aaaSDarrick J. Wong { 362f6d48000SChristoph Hellwig loff_t length = iomap_length(iter); 363afc51aaaSDarrick J. Wong loff_t done, ret; 364afc51aaaSDarrick J. Wong 365afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 3663aa9c659SMatthew Wilcox (Oracle) if (ctx->cur_folio && 3673aa9c659SMatthew Wilcox (Oracle) offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 3683aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio_in_bio) 3693aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx->cur_folio); 3703aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = NULL; 371afc51aaaSDarrick J. Wong } 3723aa9c659SMatthew Wilcox (Oracle) if (!ctx->cur_folio) { 3733aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio = readahead_folio(ctx->rac); 3743aa9c659SMatthew Wilcox (Oracle) ctx->cur_folio_in_bio = false; 375afc51aaaSDarrick J. Wong } 376f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done); 377d8af404fSAndreas Gruenbacher if (ret <= 0) 378d8af404fSAndreas Gruenbacher return ret; 379afc51aaaSDarrick J. Wong } 380afc51aaaSDarrick J. Wong 381afc51aaaSDarrick J. Wong return done; 382afc51aaaSDarrick J. Wong } 383afc51aaaSDarrick J. Wong 3849d24a13aSMatthew Wilcox (Oracle) /** 3859d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file. 3869d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read. 3879d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem. 3889d24a13aSMatthew Wilcox (Oracle) * 3899d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead 3909d24a13aSMatthew Wilcox (Oracle) * address_space operation. 3919d24a13aSMatthew Wilcox (Oracle) * 3929d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of 3939d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to 3949d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided. 3959d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This 3969d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause 3979d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered. 3989d24a13aSMatthew Wilcox (Oracle) */ 3999d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 400afc51aaaSDarrick J. Wong { 401f6d48000SChristoph Hellwig struct iomap_iter iter = { 402f6d48000SChristoph Hellwig .inode = rac->mapping->host, 403f6d48000SChristoph Hellwig .pos = readahead_pos(rac), 404f6d48000SChristoph Hellwig .len = readahead_length(rac), 405f6d48000SChristoph Hellwig }; 406afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 4079d24a13aSMatthew Wilcox (Oracle) .rac = rac, 408afc51aaaSDarrick J. Wong }; 409afc51aaaSDarrick J. Wong 410f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 4119e91c572SChristoph Hellwig 412f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0) 413f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx); 4149d24a13aSMatthew Wilcox (Oracle) 415afc51aaaSDarrick J. Wong if (ctx.bio) 416afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 4173aa9c659SMatthew Wilcox (Oracle) if (ctx.cur_folio) { 4183aa9c659SMatthew Wilcox (Oracle) if (!ctx.cur_folio_in_bio) 4193aa9c659SMatthew Wilcox (Oracle) folio_unlock(ctx.cur_folio); 420afc51aaaSDarrick J. Wong } 421afc51aaaSDarrick J. Wong } 4229d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead); 423afc51aaaSDarrick J. Wong 424afc51aaaSDarrick J. Wong /* 425afc51aaaSDarrick J. Wong * iomap_is_partially_uptodate checks whether blocks within a page are 426afc51aaaSDarrick J. Wong * uptodate or not. 427afc51aaaSDarrick J. Wong * 428afc51aaaSDarrick J. Wong * Returns true if all blocks which correspond to a file portion 429afc51aaaSDarrick J. Wong * we want to read within the page are uptodate. 430afc51aaaSDarrick J. Wong */ 431afc51aaaSDarrick J. Wong int 432afc51aaaSDarrick J. Wong iomap_is_partially_uptodate(struct page *page, unsigned long from, 433afc51aaaSDarrick J. Wong unsigned long count) 434afc51aaaSDarrick J. Wong { 43595c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 43695c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 437afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 438afc51aaaSDarrick J. Wong unsigned len, first, last; 439afc51aaaSDarrick J. Wong unsigned i; 440afc51aaaSDarrick J. Wong 441afc51aaaSDarrick J. Wong /* Limit range to one page */ 442afc51aaaSDarrick J. Wong len = min_t(unsigned, PAGE_SIZE - from, count); 443afc51aaaSDarrick J. Wong 444afc51aaaSDarrick J. Wong /* First and last blocks in range within page */ 445afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 446afc51aaaSDarrick J. Wong last = (from + len - 1) >> inode->i_blkbits; 447afc51aaaSDarrick J. Wong 448afc51aaaSDarrick J. Wong if (iop) { 449afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 450afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 451afc51aaaSDarrick J. Wong return 0; 452afc51aaaSDarrick J. Wong return 1; 453afc51aaaSDarrick J. Wong } 454afc51aaaSDarrick J. Wong 455afc51aaaSDarrick J. Wong return 0; 456afc51aaaSDarrick J. Wong } 457afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 458afc51aaaSDarrick J. Wong 459afc51aaaSDarrick J. Wong int 460afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask) 461afc51aaaSDarrick J. Wong { 462c46e8324SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 463c46e8324SMatthew Wilcox (Oracle) 46439f16c83SMatthew Wilcox (Oracle) trace_iomap_releasepage(folio->mapping->host, folio_pos(folio), 46539f16c83SMatthew Wilcox (Oracle) folio_size(folio)); 4669e91c572SChristoph Hellwig 467afc51aaaSDarrick J. Wong /* 468afc51aaaSDarrick J. Wong * mm accommodates an old ext3 case where clean pages might not have had 469afc51aaaSDarrick J. Wong * the dirty bit cleared. Thus, it can send actual dirty pages to 470f1f264b4SAndreas Gruenbacher * ->releasepage() via shrink_active_list(); skip those here. 471afc51aaaSDarrick J. Wong */ 47239f16c83SMatthew Wilcox (Oracle) if (folio_test_dirty(folio) || folio_test_writeback(folio)) 473afc51aaaSDarrick J. Wong return 0; 474c46e8324SMatthew Wilcox (Oracle) iomap_page_release(folio); 475afc51aaaSDarrick J. Wong return 1; 476afc51aaaSDarrick J. Wong } 477afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage); 478afc51aaaSDarrick J. Wong 4798306a5f5SMatthew Wilcox (Oracle) void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 480afc51aaaSDarrick J. Wong { 4818306a5f5SMatthew Wilcox (Oracle) trace_iomap_invalidatepage(folio->mapping->host, offset, len); 4829e91c572SChristoph Hellwig 483afc51aaaSDarrick J. Wong /* 484f1f264b4SAndreas Gruenbacher * If we're invalidating the entire page, clear the dirty state from it 485afc51aaaSDarrick J. Wong * and release it to avoid unnecessary buildup of the LRU. 486afc51aaaSDarrick J. Wong */ 4878306a5f5SMatthew Wilcox (Oracle) if (offset == 0 && len == folio_size(folio)) { 4888306a5f5SMatthew Wilcox (Oracle) WARN_ON_ONCE(folio_test_writeback(folio)); 4898306a5f5SMatthew Wilcox (Oracle) folio_cancel_dirty(folio); 490c46e8324SMatthew Wilcox (Oracle) iomap_page_release(folio); 491afc51aaaSDarrick J. Wong } 492afc51aaaSDarrick J. Wong } 4938306a5f5SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 4948306a5f5SMatthew Wilcox (Oracle) 4958306a5f5SMatthew Wilcox (Oracle) void iomap_invalidatepage(struct page *page, unsigned int offset, 4968306a5f5SMatthew Wilcox (Oracle) unsigned int len) 4978306a5f5SMatthew Wilcox (Oracle) { 4988306a5f5SMatthew Wilcox (Oracle) iomap_invalidate_folio(page_folio(page), offset, len); 4998306a5f5SMatthew Wilcox (Oracle) } 500afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_invalidatepage); 501afc51aaaSDarrick J. Wong 502afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION 503afc51aaaSDarrick J. Wong int 504afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage, 505afc51aaaSDarrick J. Wong struct page *page, enum migrate_mode mode) 506afc51aaaSDarrick J. Wong { 507afc51aaaSDarrick J. Wong int ret; 508afc51aaaSDarrick J. Wong 50926473f83SLinus Torvalds ret = migrate_page_move_mapping(mapping, newpage, page, 0); 510afc51aaaSDarrick J. Wong if (ret != MIGRATEPAGE_SUCCESS) 511afc51aaaSDarrick J. Wong return ret; 512afc51aaaSDarrick J. Wong 51358aeb731SGuoqing Jiang if (page_has_private(page)) 51458aeb731SGuoqing Jiang attach_page_private(newpage, detach_page_private(page)); 515afc51aaaSDarrick J. Wong 516afc51aaaSDarrick J. Wong if (mode != MIGRATE_SYNC_NO_COPY) 517afc51aaaSDarrick J. Wong migrate_page_copy(newpage, page); 518afc51aaaSDarrick J. Wong else 519afc51aaaSDarrick J. Wong migrate_page_states(newpage, page); 520afc51aaaSDarrick J. Wong return MIGRATEPAGE_SUCCESS; 521afc51aaaSDarrick J. Wong } 522afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page); 523afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */ 524afc51aaaSDarrick J. Wong 525afc51aaaSDarrick J. Wong static void 526afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 527afc51aaaSDarrick J. Wong { 528afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 529afc51aaaSDarrick J. Wong 530afc51aaaSDarrick J. Wong /* 531afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 532afc51aaaSDarrick J. Wong * write started inside the existing inode size. 533afc51aaaSDarrick J. Wong */ 534afc51aaaSDarrick J. Wong if (pos + len > i_size) 535afc51aaaSDarrick J. Wong truncate_pagecache_range(inode, max(pos, i_size), pos + len); 536afc51aaaSDarrick J. Wong } 537afc51aaaSDarrick J. Wong 538431c0566SMatthew Wilcox (Oracle) static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 539431c0566SMatthew Wilcox (Oracle) size_t poff, size_t plen, const struct iomap *iomap) 540afc51aaaSDarrick J. Wong { 541afc51aaaSDarrick J. Wong struct bio_vec bvec; 542afc51aaaSDarrick J. Wong struct bio bio; 543afc51aaaSDarrick J. Wong 544afc51aaaSDarrick J. Wong bio_init(&bio, &bvec, 1); 545afc51aaaSDarrick J. Wong bio.bi_opf = REQ_OP_READ; 546afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 547afc51aaaSDarrick J. Wong bio_set_dev(&bio, iomap->bdev); 548431c0566SMatthew Wilcox (Oracle) bio_add_folio(&bio, folio, plen, poff); 549afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 550afc51aaaSDarrick J. Wong } 551afc51aaaSDarrick J. Wong 552fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 553*bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio *folio) 554afc51aaaSDarrick J. Wong { 555fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 556435d44b3SMatthew Wilcox (Oracle) struct iomap_page *iop = iomap_page_create(iter->inode, folio); 5571b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode); 5586cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size); 5596cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size); 560431c0566SMatthew Wilcox (Oracle) size_t from = offset_in_folio(folio, pos), to = from + len; 561431c0566SMatthew Wilcox (Oracle) size_t poff, plen; 562afc51aaaSDarrick J. Wong 563431c0566SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) 564afc51aaaSDarrick J. Wong return 0; 565431c0566SMatthew Wilcox (Oracle) folio_clear_error(folio); 566afc51aaaSDarrick J. Wong 567afc51aaaSDarrick J. Wong do { 568431c0566SMatthew Wilcox (Oracle) iomap_adjust_read_range(iter->inode, folio, &block_start, 569afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 570afc51aaaSDarrick J. Wong if (plen == 0) 571afc51aaaSDarrick J. Wong break; 572afc51aaaSDarrick J. Wong 573b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) && 57432a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) && 575d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen)) 576d3b40439SChristoph Hellwig continue; 577d3b40439SChristoph Hellwig 5781b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) { 579b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 58032a38a49SChristoph Hellwig return -EIO; 581431c0566SMatthew Wilcox (Oracle) folio_zero_segments(folio, poff, from, to, poff + plen); 58214284fedSMatthew Wilcox (Oracle) } else { 583431c0566SMatthew Wilcox (Oracle) int status = iomap_read_folio_sync(block_start, folio, 58414284fedSMatthew Wilcox (Oracle) poff, plen, srcmap); 585d3b40439SChristoph Hellwig if (status) 586d3b40439SChristoph Hellwig return status; 58714284fedSMatthew Wilcox (Oracle) } 588431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, poff, plen); 589afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 590afc51aaaSDarrick J. Wong 591d3b40439SChristoph Hellwig return 0; 592afc51aaaSDarrick J. Wong } 593afc51aaaSDarrick J. Wong 594fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter, 595*bc6123a8SMatthew Wilcox (Oracle) struct folio *folio) 59669f4a26cSGao Xiang { 59769f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */ 5981b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 59969f4a26cSGao Xiang return -EIO; 600874628a2SMatthew Wilcox (Oracle) return iomap_read_inline_data(iter, folio); 60169f4a26cSGao Xiang } 60269f4a26cSGao Xiang 603fad0a1abSChristoph Hellwig static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 604*bc6123a8SMatthew Wilcox (Oracle) size_t len, struct folio **foliop) 605afc51aaaSDarrick J. Wong { 6061b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 607fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 608d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio; 609*bc6123a8SMatthew Wilcox (Oracle) unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 610afc51aaaSDarrick J. Wong int status = 0; 611afc51aaaSDarrick J. Wong 6121b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 6131b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap) 614c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length); 615afc51aaaSDarrick J. Wong 616afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 617afc51aaaSDarrick J. Wong return -EINTR; 618afc51aaaSDarrick J. Wong 619d454ab82SMatthew Wilcox (Oracle) if (!mapping_large_folio_support(iter->inode->i_mapping)) 620d454ab82SMatthew Wilcox (Oracle) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 621d454ab82SMatthew Wilcox (Oracle) 622afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_prepare) { 6231b5c1e36SChristoph Hellwig status = page_ops->page_prepare(iter->inode, pos, len); 624afc51aaaSDarrick J. Wong if (status) 625afc51aaaSDarrick J. Wong return status; 626afc51aaaSDarrick J. Wong } 627afc51aaaSDarrick J. Wong 628*bc6123a8SMatthew Wilcox (Oracle) folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 629*bc6123a8SMatthew Wilcox (Oracle) fgp, mapping_gfp_mask(iter->inode->i_mapping)); 630*bc6123a8SMatthew Wilcox (Oracle) if (!folio) { 631afc51aaaSDarrick J. Wong status = -ENOMEM; 632afc51aaaSDarrick J. Wong goto out_no_page; 633afc51aaaSDarrick J. Wong } 634d454ab82SMatthew Wilcox (Oracle) if (pos + len > folio_pos(folio) + folio_size(folio)) 635d454ab82SMatthew Wilcox (Oracle) len = folio_pos(folio) + folio_size(folio) - pos; 636afc51aaaSDarrick J. Wong 637c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) 638*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin_inline(iter, folio); 6391b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 640d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 641afc51aaaSDarrick J. Wong else 642*bc6123a8SMatthew Wilcox (Oracle) status = __iomap_write_begin(iter, pos, len, folio); 643afc51aaaSDarrick J. Wong 644afc51aaaSDarrick J. Wong if (unlikely(status)) 645afc51aaaSDarrick J. Wong goto out_unlock; 646afc51aaaSDarrick J. Wong 647*bc6123a8SMatthew Wilcox (Oracle) *foliop = folio; 648afc51aaaSDarrick J. Wong return 0; 649afc51aaaSDarrick J. Wong 650afc51aaaSDarrick J. Wong out_unlock: 651*bc6123a8SMatthew Wilcox (Oracle) folio_unlock(folio); 652*bc6123a8SMatthew Wilcox (Oracle) folio_put(folio); 6531b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 654afc51aaaSDarrick J. Wong 655afc51aaaSDarrick J. Wong out_no_page: 656afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 6571b5c1e36SChristoph Hellwig page_ops->page_done(iter->inode, pos, 0, NULL); 658afc51aaaSDarrick J. Wong return status; 659afc51aaaSDarrick J. Wong } 660afc51aaaSDarrick J. Wong 661e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 662*bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 663afc51aaaSDarrick J. Wong { 664cd1e5afeSMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 665*bc6123a8SMatthew Wilcox (Oracle) flush_dcache_folio(folio); 666afc51aaaSDarrick J. Wong 667afc51aaaSDarrick J. Wong /* 668afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 669afc51aaaSDarrick J. Wong * don't have to worry about a readpage reading them and overwriting a 670f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only 671afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 672afc51aaaSDarrick J. Wong * readpage might come in and destroy our partial write. 673afc51aaaSDarrick J. Wong * 674f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a 675f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to 676f1f264b4SAndreas Gruenbacher * redo the whole thing. 677afc51aaaSDarrick J. Wong */ 678*bc6123a8SMatthew Wilcox (Oracle) if (unlikely(copied < len && !folio_test_uptodate(folio))) 679afc51aaaSDarrick J. Wong return 0; 680431c0566SMatthew Wilcox (Oracle) iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 681*bc6123a8SMatthew Wilcox (Oracle) filemap_dirty_folio(inode->i_mapping, folio); 682afc51aaaSDarrick J. Wong return copied; 683afc51aaaSDarrick J. Wong } 684afc51aaaSDarrick J. Wong 685fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter, 686fad0a1abSChristoph Hellwig struct page *page, loff_t pos, size_t copied) 687afc51aaaSDarrick J. Wong { 688fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 689afc51aaaSDarrick J. Wong void *addr; 690afc51aaaSDarrick J. Wong 691afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 69269f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap)); 693afc51aaaSDarrick J. Wong 6947ed3cd1aSMatthew Wilcox (Oracle) flush_dcache_page(page); 695ab069d5fSMatthew Wilcox (Oracle) addr = kmap_local_page(page) + pos; 696ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied); 697ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 698afc51aaaSDarrick J. Wong 6991b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode); 700afc51aaaSDarrick J. Wong return copied; 701afc51aaaSDarrick J. Wong } 702afc51aaaSDarrick J. Wong 703e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 7041b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 705*bc6123a8SMatthew Wilcox (Oracle) size_t copied, struct folio *folio) 706afc51aaaSDarrick J. Wong { 7071b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 708fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 7091b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size; 710e25ba8cbSMatthew Wilcox (Oracle) size_t ret; 711afc51aaaSDarrick J. Wong 712c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) { 713*bc6123a8SMatthew Wilcox (Oracle) ret = iomap_write_end_inline(iter, &folio->page, pos, copied); 714c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 7151b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 716*bc6123a8SMatthew Wilcox (Oracle) copied, &folio->page, NULL); 717afc51aaaSDarrick J. Wong } else { 718*bc6123a8SMatthew Wilcox (Oracle) ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 719afc51aaaSDarrick J. Wong } 720afc51aaaSDarrick J. Wong 721afc51aaaSDarrick J. Wong /* 722afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 723afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 724afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 725afc51aaaSDarrick J. Wong */ 726afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 7271b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret); 7281b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 729afc51aaaSDarrick J. Wong } 730*bc6123a8SMatthew Wilcox (Oracle) folio_unlock(folio); 731afc51aaaSDarrick J. Wong 732afc51aaaSDarrick J. Wong if (old_size < pos) 7331b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos); 734afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 735*bc6123a8SMatthew Wilcox (Oracle) page_ops->page_done(iter->inode, pos, ret, &folio->page); 736*bc6123a8SMatthew Wilcox (Oracle) folio_put(folio); 737afc51aaaSDarrick J. Wong 738afc51aaaSDarrick J. Wong if (ret < len) 7391b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 740afc51aaaSDarrick J. Wong return ret; 741afc51aaaSDarrick J. Wong } 742afc51aaaSDarrick J. Wong 743ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 744afc51aaaSDarrick J. Wong { 745ce83a025SChristoph Hellwig loff_t length = iomap_length(iter); 746ce83a025SChristoph Hellwig loff_t pos = iter->pos; 747afc51aaaSDarrick J. Wong ssize_t written = 0; 748ce83a025SChristoph Hellwig long status = 0; 749afc51aaaSDarrick J. Wong 750afc51aaaSDarrick J. Wong do { 751*bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 752afc51aaaSDarrick J. Wong struct page *page; 753afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 754afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 755afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 756afc51aaaSDarrick J. Wong 757afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 758afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 759afc51aaaSDarrick J. Wong iov_iter_count(i)); 760afc51aaaSDarrick J. Wong again: 761afc51aaaSDarrick J. Wong if (bytes > length) 762afc51aaaSDarrick J. Wong bytes = length; 763afc51aaaSDarrick J. Wong 764afc51aaaSDarrick J. Wong /* 765f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_. 766afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 767afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 768afc51aaaSDarrick J. Wong * up-to-date. 769afc51aaaSDarrick J. Wong */ 770a6294593SAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes))) { 771afc51aaaSDarrick J. Wong status = -EFAULT; 772afc51aaaSDarrick J. Wong break; 773afc51aaaSDarrick J. Wong } 774afc51aaaSDarrick J. Wong 775*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 776afc51aaaSDarrick J. Wong if (unlikely(status)) 777afc51aaaSDarrick J. Wong break; 778afc51aaaSDarrick J. Wong 779*bc6123a8SMatthew Wilcox (Oracle) page = folio_file_page(folio, pos >> PAGE_SHIFT); 780ce83a025SChristoph Hellwig if (mapping_writably_mapped(iter->inode->i_mapping)) 781afc51aaaSDarrick J. Wong flush_dcache_page(page); 782afc51aaaSDarrick J. Wong 783f0b65f39SAl Viro copied = copy_page_from_iter_atomic(page, offset, bytes, i); 784afc51aaaSDarrick J. Wong 785*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, copied, folio); 786afc51aaaSDarrick J. Wong 787f0b65f39SAl Viro if (unlikely(copied != status)) 788f0b65f39SAl Viro iov_iter_revert(i, copied - status); 789afc51aaaSDarrick J. Wong 790f0b65f39SAl Viro cond_resched(); 791bc1bb416SAl Viro if (unlikely(status == 0)) { 792afc51aaaSDarrick J. Wong /* 793bc1bb416SAl Viro * A short copy made iomap_write_end() reject the 794bc1bb416SAl Viro * thing entirely. Might be memory poisoning 795bc1bb416SAl Viro * halfway through, might be a race with munmap, 796bc1bb416SAl Viro * might be severe memory pressure. 797afc51aaaSDarrick J. Wong */ 798bc1bb416SAl Viro if (copied) 799bc1bb416SAl Viro bytes = copied; 800afc51aaaSDarrick J. Wong goto again; 801afc51aaaSDarrick J. Wong } 802f0b65f39SAl Viro pos += status; 803f0b65f39SAl Viro written += status; 804f0b65f39SAl Viro length -= status; 805afc51aaaSDarrick J. Wong 806ce83a025SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 807afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 808afc51aaaSDarrick J. Wong 809afc51aaaSDarrick J. Wong return written ? written : status; 810afc51aaaSDarrick J. Wong } 811afc51aaaSDarrick J. Wong 812afc51aaaSDarrick J. Wong ssize_t 813ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 814afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 815afc51aaaSDarrick J. Wong { 816ce83a025SChristoph Hellwig struct iomap_iter iter = { 817ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 818ce83a025SChristoph Hellwig .pos = iocb->ki_pos, 819ce83a025SChristoph Hellwig .len = iov_iter_count(i), 820ce83a025SChristoph Hellwig .flags = IOMAP_WRITE, 821ce83a025SChristoph Hellwig }; 822ce83a025SChristoph Hellwig int ret; 823afc51aaaSDarrick J. Wong 824ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 825ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i); 826ce83a025SChristoph Hellwig if (iter.pos == iocb->ki_pos) 827ce83a025SChristoph Hellwig return ret; 828ce83a025SChristoph Hellwig return iter.pos - iocb->ki_pos; 829afc51aaaSDarrick J. Wong } 830afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 831afc51aaaSDarrick J. Wong 8328fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter) 833afc51aaaSDarrick J. Wong { 8348fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap; 835fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8368fc274d1SChristoph Hellwig loff_t pos = iter->pos; 8378fc274d1SChristoph Hellwig loff_t length = iomap_length(iter); 838afc51aaaSDarrick J. Wong long status = 0; 839d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0; 840afc51aaaSDarrick J. Wong 8413590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 8423590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 8433590c4d8SChristoph Hellwig return length; 8443590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 845c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 8463590c4d8SChristoph Hellwig return length; 8473590c4d8SChristoph Hellwig 848afc51aaaSDarrick J. Wong do { 84932a38a49SChristoph Hellwig unsigned long offset = offset_in_page(pos); 85032a38a49SChristoph Hellwig unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 851*bc6123a8SMatthew Wilcox (Oracle) struct folio *folio; 852afc51aaaSDarrick J. Wong 853*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 854afc51aaaSDarrick J. Wong if (unlikely(status)) 855afc51aaaSDarrick J. Wong return status; 856afc51aaaSDarrick J. Wong 857*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_end(iter, pos, bytes, bytes, folio); 858afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 859afc51aaaSDarrick J. Wong return -EIO; 860afc51aaaSDarrick J. Wong 861afc51aaaSDarrick J. Wong cond_resched(); 862afc51aaaSDarrick J. Wong 863afc51aaaSDarrick J. Wong pos += status; 864afc51aaaSDarrick J. Wong written += status; 865afc51aaaSDarrick J. Wong length -= status; 866afc51aaaSDarrick J. Wong 8678fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 868afc51aaaSDarrick J. Wong } while (length); 869afc51aaaSDarrick J. Wong 870afc51aaaSDarrick J. Wong return written; 871afc51aaaSDarrick J. Wong } 872afc51aaaSDarrick J. Wong 873afc51aaaSDarrick J. Wong int 8743590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 875afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 876afc51aaaSDarrick J. Wong { 8778fc274d1SChristoph Hellwig struct iomap_iter iter = { 8788fc274d1SChristoph Hellwig .inode = inode, 8798fc274d1SChristoph Hellwig .pos = pos, 8808fc274d1SChristoph Hellwig .len = len, 881b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE, 8828fc274d1SChristoph Hellwig }; 8838fc274d1SChristoph Hellwig int ret; 884afc51aaaSDarrick J. Wong 8858fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 8868fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter); 887afc51aaaSDarrick J. Wong return ret; 888afc51aaaSDarrick J. Wong } 8893590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 890afc51aaaSDarrick J. Wong 8911b5c1e36SChristoph Hellwig static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length) 892afc51aaaSDarrick J. Wong { 893a25def1fSMatthew Wilcox (Oracle) struct folio *folio; 894afc51aaaSDarrick J. Wong int status; 895a25def1fSMatthew Wilcox (Oracle) size_t offset; 896*bc6123a8SMatthew Wilcox (Oracle) size_t bytes = min_t(u64, SIZE_MAX, length); 897afc51aaaSDarrick J. Wong 898*bc6123a8SMatthew Wilcox (Oracle) status = iomap_write_begin(iter, pos, bytes, &folio); 899afc51aaaSDarrick J. Wong if (status) 900afc51aaaSDarrick J. Wong return status; 901afc51aaaSDarrick J. Wong 902a25def1fSMatthew Wilcox (Oracle) offset = offset_in_folio(folio, pos); 903a25def1fSMatthew Wilcox (Oracle) if (bytes > folio_size(folio) - offset) 904a25def1fSMatthew Wilcox (Oracle) bytes = folio_size(folio) - offset; 905a25def1fSMatthew Wilcox (Oracle) 906a25def1fSMatthew Wilcox (Oracle) folio_zero_range(folio, offset, bytes); 907a25def1fSMatthew Wilcox (Oracle) folio_mark_accessed(folio); 908afc51aaaSDarrick J. Wong 909*bc6123a8SMatthew Wilcox (Oracle) return iomap_write_end(iter, pos, bytes, bytes, folio); 910afc51aaaSDarrick J. Wong } 911afc51aaaSDarrick J. Wong 9122aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 913afc51aaaSDarrick J. Wong { 9142aa3048eSChristoph Hellwig struct iomap *iomap = &iter->iomap; 915fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 9162aa3048eSChristoph Hellwig loff_t pos = iter->pos; 9172aa3048eSChristoph Hellwig loff_t length = iomap_length(iter); 918afc51aaaSDarrick J. Wong loff_t written = 0; 919afc51aaaSDarrick J. Wong 920afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 921c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 92281ee8e52SMatthew Wilcox (Oracle) return length; 923afc51aaaSDarrick J. Wong 924afc51aaaSDarrick J. Wong do { 92581ee8e52SMatthew Wilcox (Oracle) s64 bytes; 926afc51aaaSDarrick J. Wong 9272aa3048eSChristoph Hellwig if (IS_DAX(iter->inode)) 92881ee8e52SMatthew Wilcox (Oracle) bytes = dax_iomap_zero(pos, length, iomap); 929afc51aaaSDarrick J. Wong else 9301b5c1e36SChristoph Hellwig bytes = __iomap_zero_iter(iter, pos, length); 93181ee8e52SMatthew Wilcox (Oracle) if (bytes < 0) 93281ee8e52SMatthew Wilcox (Oracle) return bytes; 933afc51aaaSDarrick J. Wong 934afc51aaaSDarrick J. Wong pos += bytes; 93581ee8e52SMatthew Wilcox (Oracle) length -= bytes; 936afc51aaaSDarrick J. Wong written += bytes; 937afc51aaaSDarrick J. Wong if (did_zero) 938afc51aaaSDarrick J. Wong *did_zero = true; 93981ee8e52SMatthew Wilcox (Oracle) } while (length > 0); 940afc51aaaSDarrick J. Wong 941afc51aaaSDarrick J. Wong return written; 942afc51aaaSDarrick J. Wong } 943afc51aaaSDarrick J. Wong 944afc51aaaSDarrick J. Wong int 945afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 946afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 947afc51aaaSDarrick J. Wong { 9482aa3048eSChristoph Hellwig struct iomap_iter iter = { 9492aa3048eSChristoph Hellwig .inode = inode, 9502aa3048eSChristoph Hellwig .pos = pos, 9512aa3048eSChristoph Hellwig .len = len, 9522aa3048eSChristoph Hellwig .flags = IOMAP_ZERO, 9532aa3048eSChristoph Hellwig }; 9542aa3048eSChristoph Hellwig int ret; 955afc51aaaSDarrick J. Wong 9562aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 9572aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero); 958afc51aaaSDarrick J. Wong return ret; 959afc51aaaSDarrick J. Wong } 960afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 961afc51aaaSDarrick J. Wong 962afc51aaaSDarrick J. Wong int 963afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 964afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 965afc51aaaSDarrick J. Wong { 966afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 967afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 968afc51aaaSDarrick J. Wong 969afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 970afc51aaaSDarrick J. Wong if (!off) 971afc51aaaSDarrick J. Wong return 0; 972afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 973afc51aaaSDarrick J. Wong } 974afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 975afc51aaaSDarrick J. Wong 976ea0f843aSMatthew Wilcox (Oracle) static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 977ea0f843aSMatthew Wilcox (Oracle) struct folio *folio) 978afc51aaaSDarrick J. Wong { 979253564baSChristoph Hellwig loff_t length = iomap_length(iter); 980afc51aaaSDarrick J. Wong int ret; 981afc51aaaSDarrick J. Wong 982253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 983d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL, 984253564baSChristoph Hellwig &iter->iomap); 985afc51aaaSDarrick J. Wong if (ret) 986afc51aaaSDarrick J. Wong return ret; 987ea0f843aSMatthew Wilcox (Oracle) block_commit_write(&folio->page, 0, length); 988afc51aaaSDarrick J. Wong } else { 989ea0f843aSMatthew Wilcox (Oracle) WARN_ON_ONCE(!folio_test_uptodate(folio)); 990ea0f843aSMatthew Wilcox (Oracle) folio_mark_dirty(folio); 991afc51aaaSDarrick J. Wong } 992afc51aaaSDarrick J. Wong 993afc51aaaSDarrick J. Wong return length; 994afc51aaaSDarrick J. Wong } 995afc51aaaSDarrick J. Wong 996afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 997afc51aaaSDarrick J. Wong { 998253564baSChristoph Hellwig struct iomap_iter iter = { 999253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file), 1000253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT, 1001253564baSChristoph Hellwig }; 1002ea0f843aSMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page); 1003afc51aaaSDarrick J. Wong ssize_t ret; 1004afc51aaaSDarrick J. Wong 1005ea0f843aSMatthew Wilcox (Oracle) folio_lock(folio); 1006ea0f843aSMatthew Wilcox (Oracle) ret = folio_mkwrite_check_truncate(folio, iter.inode); 1007243145bcSAndreas Gruenbacher if (ret < 0) 1008afc51aaaSDarrick J. Wong goto out_unlock; 1009ea0f843aSMatthew Wilcox (Oracle) iter.pos = folio_pos(folio); 1010253564baSChristoph Hellwig iter.len = ret; 1011253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 1012ea0f843aSMatthew Wilcox (Oracle) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1013afc51aaaSDarrick J. Wong 1014253564baSChristoph Hellwig if (ret < 0) 1015afc51aaaSDarrick J. Wong goto out_unlock; 1016ea0f843aSMatthew Wilcox (Oracle) folio_wait_stable(folio); 1017afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1018afc51aaaSDarrick J. Wong out_unlock: 1019ea0f843aSMatthew Wilcox (Oracle) folio_unlock(folio); 1020afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1021afc51aaaSDarrick J. Wong } 1022afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1023598ecfbaSChristoph Hellwig 10248ffd74e9SMatthew Wilcox (Oracle) static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 10258ffd74e9SMatthew Wilcox (Oracle) size_t len, int error) 1026598ecfbaSChristoph Hellwig { 102795c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 1028598ecfbaSChristoph Hellwig 1029598ecfbaSChristoph Hellwig if (error) { 10308ffd74e9SMatthew Wilcox (Oracle) folio_set_error(folio); 1031b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1032598ecfbaSChristoph Hellwig } 1033598ecfbaSChristoph Hellwig 10348ffd74e9SMatthew Wilcox (Oracle) WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 10350fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1036598ecfbaSChristoph Hellwig 10370fb2d720SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 10388ffd74e9SMatthew Wilcox (Oracle) folio_end_writeback(folio); 1039598ecfbaSChristoph Hellwig } 1040598ecfbaSChristoph Hellwig 1041598ecfbaSChristoph Hellwig /* 1042598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1043598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1044598ecfbaSChristoph Hellwig * ioend after this. 1045598ecfbaSChristoph Hellwig */ 1046598ecfbaSChristoph Hellwig static void 1047598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1048598ecfbaSChristoph Hellwig { 1049598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1050598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1051598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1052598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1053c275779fSZorro Lang loff_t offset = ioend->io_offset; 1054598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1055598ecfbaSChristoph Hellwig 1056598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 10578ffd74e9SMatthew Wilcox (Oracle) struct folio_iter fi; 1058598ecfbaSChristoph Hellwig 1059598ecfbaSChristoph Hellwig /* 1060598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1061598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1062598ecfbaSChristoph Hellwig */ 1063598ecfbaSChristoph Hellwig if (bio == last) 1064598ecfbaSChristoph Hellwig next = NULL; 1065598ecfbaSChristoph Hellwig else 1066598ecfbaSChristoph Hellwig next = bio->bi_private; 1067598ecfbaSChristoph Hellwig 10688ffd74e9SMatthew Wilcox (Oracle) /* walk all folios in bio, ending page IO on them */ 10698ffd74e9SMatthew Wilcox (Oracle) bio_for_each_folio_all(fi, bio) 10708ffd74e9SMatthew Wilcox (Oracle) iomap_finish_folio_write(inode, fi.folio, fi.length, 10718ffd74e9SMatthew Wilcox (Oracle) error); 1072598ecfbaSChristoph Hellwig bio_put(bio); 1073598ecfbaSChristoph Hellwig } 1074c275779fSZorro Lang /* The ioend has been freed by bio_put() */ 1075598ecfbaSChristoph Hellwig 1076598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1077598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 10789cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 1079c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start); 1080598ecfbaSChristoph Hellwig } 1081598ecfbaSChristoph Hellwig } 1082598ecfbaSChristoph Hellwig 1083598ecfbaSChristoph Hellwig void 1084598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1085598ecfbaSChristoph Hellwig { 1086598ecfbaSChristoph Hellwig struct list_head tmp; 1087598ecfbaSChristoph Hellwig 1088598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1089598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1090598ecfbaSChristoph Hellwig 1091598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1092598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1093598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1094598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1095598ecfbaSChristoph Hellwig } 1096598ecfbaSChristoph Hellwig } 1097598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1098598ecfbaSChristoph Hellwig 1099598ecfbaSChristoph Hellwig /* 1100598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1101598ecfbaSChristoph Hellwig */ 1102598ecfbaSChristoph Hellwig static bool 1103598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1104598ecfbaSChristoph Hellwig { 1105598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1106598ecfbaSChristoph Hellwig return false; 1107598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1108598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1109598ecfbaSChristoph Hellwig return false; 1110598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1111598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1112598ecfbaSChristoph Hellwig return false; 1113598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1114598ecfbaSChristoph Hellwig return false; 1115598ecfbaSChristoph Hellwig return true; 1116598ecfbaSChristoph Hellwig } 1117598ecfbaSChristoph Hellwig 1118598ecfbaSChristoph Hellwig void 11196e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1120598ecfbaSChristoph Hellwig { 1121598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1122598ecfbaSChristoph Hellwig 1123598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1124598ecfbaSChristoph Hellwig 1125598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1126598ecfbaSChristoph Hellwig io_list))) { 1127598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1128598ecfbaSChristoph Hellwig break; 1129598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1130598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1131598ecfbaSChristoph Hellwig } 1132598ecfbaSChristoph Hellwig } 1133598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1134598ecfbaSChristoph Hellwig 1135598ecfbaSChristoph Hellwig static int 11364f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a, 11374f0f586bSSami Tolvanen const struct list_head *b) 1138598ecfbaSChristoph Hellwig { 1139b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1140b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1141598ecfbaSChristoph Hellwig 1142598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1143598ecfbaSChristoph Hellwig return -1; 1144b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1145598ecfbaSChristoph Hellwig return 1; 1146598ecfbaSChristoph Hellwig return 0; 1147598ecfbaSChristoph Hellwig } 1148598ecfbaSChristoph Hellwig 1149598ecfbaSChristoph Hellwig void 1150598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1151598ecfbaSChristoph Hellwig { 1152598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1153598ecfbaSChristoph Hellwig } 1154598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1155598ecfbaSChristoph Hellwig 1156598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1157598ecfbaSChristoph Hellwig { 1158598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1159598ecfbaSChristoph Hellwig 1160598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1161598ecfbaSChristoph Hellwig } 1162598ecfbaSChristoph Hellwig 1163598ecfbaSChristoph Hellwig /* 1164598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1165598ecfbaSChristoph Hellwig * 1166598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1167f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback 1168598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1169598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1170598ecfbaSChristoph Hellwig */ 1171598ecfbaSChristoph Hellwig static int 1172598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1173598ecfbaSChristoph Hellwig int error) 1174598ecfbaSChristoph Hellwig { 1175598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1176598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1177598ecfbaSChristoph Hellwig 1178598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1179598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1180598ecfbaSChristoph Hellwig if (error) { 1181598ecfbaSChristoph Hellwig /* 1182f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an 1183598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1184598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1185598ecfbaSChristoph Hellwig * time. 1186598ecfbaSChristoph Hellwig */ 1187598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1188598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1189598ecfbaSChristoph Hellwig return error; 1190598ecfbaSChristoph Hellwig } 1191598ecfbaSChristoph Hellwig 1192598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1193598ecfbaSChristoph Hellwig return 0; 1194598ecfbaSChristoph Hellwig } 1195598ecfbaSChristoph Hellwig 1196598ecfbaSChristoph Hellwig static struct iomap_ioend * 1197598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1198598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1199598ecfbaSChristoph Hellwig { 1200598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1201598ecfbaSChristoph Hellwig struct bio *bio; 1202598ecfbaSChristoph Hellwig 1203a8affc03SChristoph Hellwig bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); 1204598ecfbaSChristoph Hellwig bio_set_dev(bio, wpc->iomap.bdev); 1205598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1206598ecfbaSChristoph Hellwig bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1207598ecfbaSChristoph Hellwig bio->bi_write_hint = inode->i_write_hint; 1208598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1209598ecfbaSChristoph Hellwig 1210598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1211598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1212598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1213598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1214598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1215598ecfbaSChristoph Hellwig ioend->io_size = 0; 1216598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1217598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1218598ecfbaSChristoph Hellwig return ioend; 1219598ecfbaSChristoph Hellwig } 1220598ecfbaSChristoph Hellwig 1221598ecfbaSChristoph Hellwig /* 1222598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1223598ecfbaSChristoph Hellwig * 1224f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order 1225598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1226598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1227598ecfbaSChristoph Hellwig */ 1228598ecfbaSChristoph Hellwig static struct bio * 1229598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1230598ecfbaSChristoph Hellwig { 1231598ecfbaSChristoph Hellwig struct bio *new; 1232598ecfbaSChristoph Hellwig 1233a8affc03SChristoph Hellwig new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); 1234598ecfbaSChristoph Hellwig bio_copy_dev(new, prev);/* also copies over blkcg information */ 1235598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1236598ecfbaSChristoph Hellwig new->bi_opf = prev->bi_opf; 1237598ecfbaSChristoph Hellwig new->bi_write_hint = prev->bi_write_hint; 1238598ecfbaSChristoph Hellwig 1239598ecfbaSChristoph Hellwig bio_chain(prev, new); 1240598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1241598ecfbaSChristoph Hellwig submit_bio(prev); 1242598ecfbaSChristoph Hellwig return new; 1243598ecfbaSChristoph Hellwig } 1244598ecfbaSChristoph Hellwig 1245598ecfbaSChristoph Hellwig static bool 1246598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1247598ecfbaSChristoph Hellwig sector_t sector) 1248598ecfbaSChristoph Hellwig { 1249598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1250598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1251598ecfbaSChristoph Hellwig return false; 1252598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1253598ecfbaSChristoph Hellwig return false; 1254598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1255598ecfbaSChristoph Hellwig return false; 1256598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1257598ecfbaSChristoph Hellwig return false; 1258598ecfbaSChristoph Hellwig return true; 1259598ecfbaSChristoph Hellwig } 1260598ecfbaSChristoph Hellwig 1261598ecfbaSChristoph Hellwig /* 1262598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1263f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another. 1264598ecfbaSChristoph Hellwig */ 1265598ecfbaSChristoph Hellwig static void 1266598ecfbaSChristoph Hellwig iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, 1267598ecfbaSChristoph Hellwig struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1268598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1269598ecfbaSChristoph Hellwig { 1270598ecfbaSChristoph Hellwig sector_t sector = iomap_sector(&wpc->iomap, offset); 1271598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1272598ecfbaSChristoph Hellwig unsigned poff = offset & (PAGE_SIZE - 1); 1273598ecfbaSChristoph Hellwig 1274598ecfbaSChristoph Hellwig if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) { 1275598ecfbaSChristoph Hellwig if (wpc->ioend) 1276598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1277598ecfbaSChristoph Hellwig wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc); 1278598ecfbaSChristoph Hellwig } 1279598ecfbaSChristoph Hellwig 1280c1b79f11SChristoph Hellwig if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) { 1281c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1282c1b79f11SChristoph Hellwig __bio_add_page(wpc->ioend->io_bio, page, len, poff); 1283c1b79f11SChristoph Hellwig } 1284c1b79f11SChristoph Hellwig 12850fb2d720SMatthew Wilcox (Oracle) if (iop) 12860fb2d720SMatthew Wilcox (Oracle) atomic_add(len, &iop->write_bytes_pending); 1287598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1288598ecfbaSChristoph Hellwig wbc_account_cgroup_owner(wbc, page, len); 1289598ecfbaSChristoph Hellwig } 1290598ecfbaSChristoph Hellwig 1291598ecfbaSChristoph Hellwig /* 1292598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1293598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1294f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're 1295f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block 1296f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that 1297598ecfbaSChristoph Hellwig * instead. 1298598ecfbaSChristoph Hellwig * 1299598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1300598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1301598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1302598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1303598ecfbaSChristoph Hellwig * 1304598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1305598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1306598ecfbaSChristoph Hellwig */ 1307598ecfbaSChristoph Hellwig static int 1308598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1309598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1310598ecfbaSChristoph Hellwig struct page *page, u64 end_offset) 1311598ecfbaSChristoph Hellwig { 1312435d44b3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1313435d44b3SMatthew Wilcox (Oracle) struct iomap_page *iop = iomap_page_create(inode, folio); 1314598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1315598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1316598ecfbaSChristoph Hellwig u64 file_offset; /* file offset of page */ 1317598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1318598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1319598ecfbaSChristoph Hellwig 13200fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1321598ecfbaSChristoph Hellwig 1322598ecfbaSChristoph Hellwig /* 1323598ecfbaSChristoph Hellwig * Walk through the page to find areas to write back. If we run off the 1324598ecfbaSChristoph Hellwig * end of the current map or find the current map invalid, grab a new 1325598ecfbaSChristoph Hellwig * one. 1326598ecfbaSChristoph Hellwig */ 1327598ecfbaSChristoph Hellwig for (i = 0, file_offset = page_offset(page); 1328598ecfbaSChristoph Hellwig i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; 1329598ecfbaSChristoph Hellwig i++, file_offset += len) { 1330598ecfbaSChristoph Hellwig if (iop && !test_bit(i, iop->uptodate)) 1331598ecfbaSChristoph Hellwig continue; 1332598ecfbaSChristoph Hellwig 1333598ecfbaSChristoph Hellwig error = wpc->ops->map_blocks(wpc, inode, file_offset); 1334598ecfbaSChristoph Hellwig if (error) 1335598ecfbaSChristoph Hellwig break; 13363e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 13373e19e6f3SChristoph Hellwig continue; 1338598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1339598ecfbaSChristoph Hellwig continue; 1340598ecfbaSChristoph Hellwig iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, 1341598ecfbaSChristoph Hellwig &submit_list); 1342598ecfbaSChristoph Hellwig count++; 1343598ecfbaSChristoph Hellwig } 1344598ecfbaSChristoph Hellwig 1345598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1346598ecfbaSChristoph Hellwig WARN_ON_ONCE(!PageLocked(page)); 1347598ecfbaSChristoph Hellwig WARN_ON_ONCE(PageWriteback(page)); 134850e7d6c7SBrian Foster WARN_ON_ONCE(PageDirty(page)); 1349598ecfbaSChristoph Hellwig 1350598ecfbaSChristoph Hellwig /* 1351598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1352598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1353598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1354598ecfbaSChristoph Hellwig * appropriately. 1355598ecfbaSChristoph Hellwig */ 1356598ecfbaSChristoph Hellwig if (unlikely(error)) { 1357598ecfbaSChristoph Hellwig /* 1358763e4cdcSBrian Foster * Let the filesystem know what portion of the current page 1359f1f264b4SAndreas Gruenbacher * failed to map. If the page hasn't been added to ioend, it 1360763e4cdcSBrian Foster * won't be affected by I/O completion and we must unlock it 1361763e4cdcSBrian Foster * now. 1362598ecfbaSChristoph Hellwig */ 1363598ecfbaSChristoph Hellwig if (wpc->ops->discard_page) 1364763e4cdcSBrian Foster wpc->ops->discard_page(page, file_offset); 1365763e4cdcSBrian Foster if (!count) { 1366598ecfbaSChristoph Hellwig ClearPageUptodate(page); 1367598ecfbaSChristoph Hellwig unlock_page(page); 1368598ecfbaSChristoph Hellwig goto done; 1369598ecfbaSChristoph Hellwig } 1370598ecfbaSChristoph Hellwig } 1371598ecfbaSChristoph Hellwig 137250e7d6c7SBrian Foster set_page_writeback(page); 1373598ecfbaSChristoph Hellwig unlock_page(page); 1374598ecfbaSChristoph Hellwig 1375598ecfbaSChristoph Hellwig /* 1376f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch 1377598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1378598ecfbaSChristoph Hellwig * submissions. 1379598ecfbaSChristoph Hellwig */ 1380598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1381598ecfbaSChristoph Hellwig int error2; 1382598ecfbaSChristoph Hellwig 1383598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1384598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1385598ecfbaSChristoph Hellwig if (error2 && !error) 1386598ecfbaSChristoph Hellwig error = error2; 1387598ecfbaSChristoph Hellwig } 1388598ecfbaSChristoph Hellwig 1389598ecfbaSChristoph Hellwig /* 1390598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1391598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1392598ecfbaSChristoph Hellwig */ 1393598ecfbaSChristoph Hellwig if (!count) 1394598ecfbaSChristoph Hellwig end_page_writeback(page); 1395598ecfbaSChristoph Hellwig done: 1396598ecfbaSChristoph Hellwig mapping_set_error(page->mapping, error); 1397598ecfbaSChristoph Hellwig return error; 1398598ecfbaSChristoph Hellwig } 1399598ecfbaSChristoph Hellwig 1400598ecfbaSChristoph Hellwig /* 1401598ecfbaSChristoph Hellwig * Write out a dirty page. 1402598ecfbaSChristoph Hellwig * 1403f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it. 1404f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to 1405598ecfbaSChristoph Hellwig * regular allocated space. 1406598ecfbaSChristoph Hellwig */ 1407598ecfbaSChristoph Hellwig static int 1408598ecfbaSChristoph Hellwig iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1409598ecfbaSChristoph Hellwig { 1410598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1411598ecfbaSChristoph Hellwig struct inode *inode = page->mapping->host; 1412598ecfbaSChristoph Hellwig pgoff_t end_index; 1413598ecfbaSChristoph Hellwig u64 end_offset; 1414598ecfbaSChristoph Hellwig loff_t offset; 1415598ecfbaSChristoph Hellwig 14161ac99452SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE); 1417598ecfbaSChristoph Hellwig 1418598ecfbaSChristoph Hellwig /* 1419f1f264b4SAndreas Gruenbacher * Refuse to write the page out if we're called from reclaim context. 1420598ecfbaSChristoph Hellwig * 1421598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1422598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1423598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1424598ecfbaSChristoph Hellwig * 1425598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1426598ecfbaSChristoph Hellwig * warn about it. 1427598ecfbaSChristoph Hellwig */ 1428598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1429598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1430598ecfbaSChristoph Hellwig goto redirty; 1431598ecfbaSChristoph Hellwig 1432598ecfbaSChristoph Hellwig /* 1433598ecfbaSChristoph Hellwig * Is this page beyond the end of the file? 1434598ecfbaSChristoph Hellwig * 1435598ecfbaSChristoph Hellwig * The page index is less than the end_index, adjust the end_offset 1436598ecfbaSChristoph Hellwig * to the highest offset that this page should represent. 1437598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1438598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1439598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1440598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1441598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1442598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1443598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1444598ecfbaSChristoph Hellwig */ 1445598ecfbaSChristoph Hellwig offset = i_size_read(inode); 1446598ecfbaSChristoph Hellwig end_index = offset >> PAGE_SHIFT; 1447598ecfbaSChristoph Hellwig if (page->index < end_index) 1448598ecfbaSChristoph Hellwig end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT; 1449598ecfbaSChristoph Hellwig else { 1450598ecfbaSChristoph Hellwig /* 1451598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1452598ecfbaSChristoph Hellwig * i_size or not. 1453598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1454598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1455598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1456598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1457598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1458598ecfbaSChristoph Hellwig * | | Straddles | 1459598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1460598ecfbaSChristoph Hellwig */ 1461598ecfbaSChristoph Hellwig unsigned offset_into_page = offset & (PAGE_SIZE - 1); 1462598ecfbaSChristoph Hellwig 1463598ecfbaSChristoph Hellwig /* 1464f1f264b4SAndreas Gruenbacher * Skip the page if it's fully outside i_size, e.g. due to a 1465f1f264b4SAndreas Gruenbacher * truncate operation that's in progress. We must redirty the 1466598ecfbaSChristoph Hellwig * page so that reclaim stops reclaiming it. Otherwise 1467598ecfbaSChristoph Hellwig * iomap_vm_releasepage() is called on it and gets confused. 1468598ecfbaSChristoph Hellwig * 1469f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given 1470f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we 1471f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with 1472f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would 1473f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be 1474f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in 1475f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation 1476f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by 1477f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its 1478598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1479598ecfbaSChristoph Hellwig */ 1480598ecfbaSChristoph Hellwig if (page->index > end_index || 1481598ecfbaSChristoph Hellwig (page->index == end_index && offset_into_page == 0)) 1482598ecfbaSChristoph Hellwig goto redirty; 1483598ecfbaSChristoph Hellwig 1484598ecfbaSChristoph Hellwig /* 1485598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1486598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1487598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1488598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1489598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1490598ecfbaSChristoph Hellwig * not written out to the file." 1491598ecfbaSChristoph Hellwig */ 1492598ecfbaSChristoph Hellwig zero_user_segment(page, offset_into_page, PAGE_SIZE); 1493598ecfbaSChristoph Hellwig 1494598ecfbaSChristoph Hellwig /* Adjust the end_offset to the end of file */ 1495598ecfbaSChristoph Hellwig end_offset = offset; 1496598ecfbaSChristoph Hellwig } 1497598ecfbaSChristoph Hellwig 1498598ecfbaSChristoph Hellwig return iomap_writepage_map(wpc, wbc, inode, page, end_offset); 1499598ecfbaSChristoph Hellwig 1500598ecfbaSChristoph Hellwig redirty: 1501598ecfbaSChristoph Hellwig redirty_page_for_writepage(wbc, page); 1502598ecfbaSChristoph Hellwig unlock_page(page); 1503598ecfbaSChristoph Hellwig return 0; 1504598ecfbaSChristoph Hellwig } 1505598ecfbaSChristoph Hellwig 1506598ecfbaSChristoph Hellwig int 1507598ecfbaSChristoph Hellwig iomap_writepage(struct page *page, struct writeback_control *wbc, 1508598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1509598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1510598ecfbaSChristoph Hellwig { 1511598ecfbaSChristoph Hellwig int ret; 1512598ecfbaSChristoph Hellwig 1513598ecfbaSChristoph Hellwig wpc->ops = ops; 1514598ecfbaSChristoph Hellwig ret = iomap_do_writepage(page, wbc, wpc); 1515598ecfbaSChristoph Hellwig if (!wpc->ioend) 1516598ecfbaSChristoph Hellwig return ret; 1517598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1518598ecfbaSChristoph Hellwig } 1519598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepage); 1520598ecfbaSChristoph Hellwig 1521598ecfbaSChristoph Hellwig int 1522598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1523598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1524598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1525598ecfbaSChristoph Hellwig { 1526598ecfbaSChristoph Hellwig int ret; 1527598ecfbaSChristoph Hellwig 1528598ecfbaSChristoph Hellwig wpc->ops = ops; 1529598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1530598ecfbaSChristoph Hellwig if (!wpc->ioend) 1531598ecfbaSChristoph Hellwig return ret; 1532598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1533598ecfbaSChristoph Hellwig } 1534598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1535598ecfbaSChristoph Hellwig 1536598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1537598ecfbaSChristoph Hellwig { 1538598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1539598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1540598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1541598ecfbaSChristoph Hellwig } 1542598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1543