1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ab08b01eSChristoph Hellwig /* 25*95c4cd05SMatthew Wilcox (Oracle) * Structure allocated for each folio when block size < folio size 26*95c4cd05SMatthew Wilcox (Oracle) * to track sub-folio uptodate status and I/O completions. 27ab08b01eSChristoph Hellwig */ 28ab08b01eSChristoph Hellwig struct iomap_page { 297d636676SMatthew Wilcox (Oracle) atomic_t read_bytes_pending; 300fb2d720SMatthew Wilcox (Oracle) atomic_t write_bytes_pending; 311cea335dSChristoph Hellwig spinlock_t uptodate_lock; 320a195b91SMatthew Wilcox (Oracle) unsigned long uptodate[]; 33ab08b01eSChristoph Hellwig }; 34ab08b01eSChristoph Hellwig 35*95c4cd05SMatthew Wilcox (Oracle) static inline struct iomap_page *to_iomap_page(struct folio *folio) 36ab08b01eSChristoph Hellwig { 37*95c4cd05SMatthew Wilcox (Oracle) if (folio_test_private(folio)) 38*95c4cd05SMatthew Wilcox (Oracle) return folio_get_private(folio); 39ab08b01eSChristoph Hellwig return NULL; 40ab08b01eSChristoph Hellwig } 41ab08b01eSChristoph Hellwig 42598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 43598ecfbaSChristoph Hellwig 44afc51aaaSDarrick J. Wong static struct iomap_page * 45afc51aaaSDarrick J. Wong iomap_page_create(struct inode *inode, struct page *page) 46afc51aaaSDarrick J. Wong { 47*95c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 48*95c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 490a195b91SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_page(inode, page); 50afc51aaaSDarrick J. Wong 510a195b91SMatthew Wilcox (Oracle) if (iop || nr_blocks <= 1) 52afc51aaaSDarrick J. Wong return iop; 53afc51aaaSDarrick J. Wong 540a195b91SMatthew Wilcox (Oracle) iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 550a195b91SMatthew Wilcox (Oracle) GFP_NOFS | __GFP_NOFAIL); 561cea335dSChristoph Hellwig spin_lock_init(&iop->uptodate_lock); 574595a298SMatthew Wilcox (Oracle) if (PageUptodate(page)) 584595a298SMatthew Wilcox (Oracle) bitmap_fill(iop->uptodate, nr_blocks); 5958aeb731SGuoqing Jiang attach_page_private(page, iop); 60afc51aaaSDarrick J. Wong return iop; 61afc51aaaSDarrick J. Wong } 62afc51aaaSDarrick J. Wong 63afc51aaaSDarrick J. Wong static void 64afc51aaaSDarrick J. Wong iomap_page_release(struct page *page) 65afc51aaaSDarrick J. Wong { 6658aeb731SGuoqing Jiang struct iomap_page *iop = detach_page_private(page); 670a195b91SMatthew Wilcox (Oracle) unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); 68afc51aaaSDarrick J. Wong 69afc51aaaSDarrick J. Wong if (!iop) 70afc51aaaSDarrick J. Wong return; 717d636676SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 720fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 730a195b91SMatthew Wilcox (Oracle) WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 740a195b91SMatthew Wilcox (Oracle) PageUptodate(page)); 75afc51aaaSDarrick J. Wong kfree(iop); 76afc51aaaSDarrick J. Wong } 77afc51aaaSDarrick J. Wong 78afc51aaaSDarrick J. Wong /* 79afc51aaaSDarrick J. Wong * Calculate the range inside the page that we actually need to read. 80afc51aaaSDarrick J. Wong */ 81afc51aaaSDarrick J. Wong static void 82afc51aaaSDarrick J. Wong iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 83afc51aaaSDarrick J. Wong loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 84afc51aaaSDarrick J. Wong { 85afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 86afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 87afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 88afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 89afc51aaaSDarrick J. Wong unsigned poff = offset_in_page(*pos); 90afc51aaaSDarrick J. Wong unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 91afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 92afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 93afc51aaaSDarrick J. Wong 94afc51aaaSDarrick J. Wong /* 95f1f264b4SAndreas Gruenbacher * If the block size is smaller than the page size, we need to check the 96afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 97afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 98afc51aaaSDarrick J. Wong */ 99afc51aaaSDarrick J. Wong if (iop) { 100afc51aaaSDarrick J. Wong unsigned int i; 101afc51aaaSDarrick J. Wong 102afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 103afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 104afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 105afc51aaaSDarrick J. Wong break; 106afc51aaaSDarrick J. Wong *pos += block_size; 107afc51aaaSDarrick J. Wong poff += block_size; 108afc51aaaSDarrick J. Wong plen -= block_size; 109afc51aaaSDarrick J. Wong first++; 110afc51aaaSDarrick J. Wong } 111afc51aaaSDarrick J. Wong 112afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 113afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 114afc51aaaSDarrick J. Wong if (test_bit(i, iop->uptodate)) { 115afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 116afc51aaaSDarrick J. Wong last = i - 1; 117afc51aaaSDarrick J. Wong break; 118afc51aaaSDarrick J. Wong } 119afc51aaaSDarrick J. Wong } 120afc51aaaSDarrick J. Wong } 121afc51aaaSDarrick J. Wong 122afc51aaaSDarrick J. Wong /* 123f1f264b4SAndreas Gruenbacher * If the extent spans the block that contains the i_size, we need to 124afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 125afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 126afc51aaaSDarrick J. Wong */ 127afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 128afc51aaaSDarrick J. Wong unsigned end = offset_in_page(isize - 1) >> block_bits; 129afc51aaaSDarrick J. Wong 130afc51aaaSDarrick J. Wong if (first <= end && last > end) 131afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 132afc51aaaSDarrick J. Wong } 133afc51aaaSDarrick J. Wong 134afc51aaaSDarrick J. Wong *offp = poff; 135afc51aaaSDarrick J. Wong *lenp = plen; 136afc51aaaSDarrick J. Wong } 137afc51aaaSDarrick J. Wong 138afc51aaaSDarrick J. Wong static void 1391cea335dSChristoph Hellwig iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) 140afc51aaaSDarrick J. Wong { 141*95c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 142*95c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 143afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 144afc51aaaSDarrick J. Wong unsigned first = off >> inode->i_blkbits; 145afc51aaaSDarrick J. Wong unsigned last = (off + len - 1) >> inode->i_blkbits; 1461cea335dSChristoph Hellwig unsigned long flags; 147afc51aaaSDarrick J. Wong 1481cea335dSChristoph Hellwig spin_lock_irqsave(&iop->uptodate_lock, flags); 149b21866f5SMatthew Wilcox (Oracle) bitmap_set(iop->uptodate, first, last - first + 1); 150b21866f5SMatthew Wilcox (Oracle) if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page))) 1511cea335dSChristoph Hellwig SetPageUptodate(page); 1521cea335dSChristoph Hellwig spin_unlock_irqrestore(&iop->uptodate_lock, flags); 153afc51aaaSDarrick J. Wong } 154afc51aaaSDarrick J. Wong 1551cea335dSChristoph Hellwig static void 1561cea335dSChristoph Hellwig iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) 1571cea335dSChristoph Hellwig { 1581cea335dSChristoph Hellwig if (PageError(page)) 1591cea335dSChristoph Hellwig return; 1601cea335dSChristoph Hellwig 1611cea335dSChristoph Hellwig if (page_has_private(page)) 1621cea335dSChristoph Hellwig iomap_iop_set_range_uptodate(page, off, len); 1631cea335dSChristoph Hellwig else 164afc51aaaSDarrick J. Wong SetPageUptodate(page); 165afc51aaaSDarrick J. Wong } 166afc51aaaSDarrick J. Wong 167afc51aaaSDarrick J. Wong static void 168afc51aaaSDarrick J. Wong iomap_read_page_end_io(struct bio_vec *bvec, int error) 169afc51aaaSDarrick J. Wong { 170afc51aaaSDarrick J. Wong struct page *page = bvec->bv_page; 171*95c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 172*95c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 173afc51aaaSDarrick J. Wong 174afc51aaaSDarrick J. Wong if (unlikely(error)) { 175afc51aaaSDarrick J. Wong ClearPageUptodate(page); 176afc51aaaSDarrick J. Wong SetPageError(page); 177afc51aaaSDarrick J. Wong } else { 178afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); 179afc51aaaSDarrick J. Wong } 180afc51aaaSDarrick J. Wong 1817d636676SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending)) 1827d636676SMatthew Wilcox (Oracle) unlock_page(page); 183afc51aaaSDarrick J. Wong } 184afc51aaaSDarrick J. Wong 185afc51aaaSDarrick J. Wong static void 186afc51aaaSDarrick J. Wong iomap_read_end_io(struct bio *bio) 187afc51aaaSDarrick J. Wong { 188afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 189afc51aaaSDarrick J. Wong struct bio_vec *bvec; 190afc51aaaSDarrick J. Wong struct bvec_iter_all iter_all; 191afc51aaaSDarrick J. Wong 192afc51aaaSDarrick J. Wong bio_for_each_segment_all(bvec, bio, iter_all) 193afc51aaaSDarrick J. Wong iomap_read_page_end_io(bvec, error); 194afc51aaaSDarrick J. Wong bio_put(bio); 195afc51aaaSDarrick J. Wong } 196afc51aaaSDarrick J. Wong 197afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 198afc51aaaSDarrick J. Wong struct page *cur_page; 199afc51aaaSDarrick J. Wong bool cur_page_in_bio; 200afc51aaaSDarrick J. Wong struct bio *bio; 2019d24a13aSMatthew Wilcox (Oracle) struct readahead_control *rac; 202afc51aaaSDarrick J. Wong }; 203afc51aaaSDarrick J. Wong 2045ad448ceSAndreas Gruenbacher /** 2055ad448ceSAndreas Gruenbacher * iomap_read_inline_data - copy inline data into the page cache 2065ad448ceSAndreas Gruenbacher * @iter: iteration structure 2075ad448ceSAndreas Gruenbacher * @page: page to copy to 2085ad448ceSAndreas Gruenbacher * 2095ad448ceSAndreas Gruenbacher * Copy the inline data in @iter into @page and zero out the rest of the page. 2105ad448ceSAndreas Gruenbacher * Only a single IOMAP_INLINE extent is allowed at the end of each file. 2115ad448ceSAndreas Gruenbacher * Returns zero for success to complete the read, or the usual negative errno. 2125ad448ceSAndreas Gruenbacher */ 2135ad448ceSAndreas Gruenbacher static int iomap_read_inline_data(const struct iomap_iter *iter, 2141b5c1e36SChristoph Hellwig struct page *page) 215afc51aaaSDarrick J. Wong { 216fad0a1abSChristoph Hellwig const struct iomap *iomap = iomap_iter_srcmap(iter); 2171b5c1e36SChristoph Hellwig size_t size = i_size_read(iter->inode) - iomap->offset; 218b405435bSMatthew Wilcox (Oracle) size_t poff = offset_in_page(iomap->offset); 219afc51aaaSDarrick J. Wong void *addr; 220afc51aaaSDarrick J. Wong 221afc51aaaSDarrick J. Wong if (PageUptodate(page)) 2225ad448ceSAndreas Gruenbacher return 0; 223afc51aaaSDarrick J. Wong 224ae44f9c2SMatthew Wilcox (Oracle) if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 225ae44f9c2SMatthew Wilcox (Oracle) return -EIO; 22669f4a26cSGao Xiang if (WARN_ON_ONCE(size > PAGE_SIZE - 22769f4a26cSGao Xiang offset_in_page(iomap->inline_data))) 22869f4a26cSGao Xiang return -EIO; 22969f4a26cSGao Xiang if (WARN_ON_ONCE(size > iomap->length)) 23069f4a26cSGao Xiang return -EIO; 231b405435bSMatthew Wilcox (Oracle) if (poff > 0) 2321b5c1e36SChristoph Hellwig iomap_page_create(iter->inode, page); 233afc51aaaSDarrick J. Wong 234ab069d5fSMatthew Wilcox (Oracle) addr = kmap_local_page(page) + poff; 235afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 236b405435bSMatthew Wilcox (Oracle) memset(addr + size, 0, PAGE_SIZE - poff - size); 237ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 238b405435bSMatthew Wilcox (Oracle) iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff); 2395ad448ceSAndreas Gruenbacher return 0; 240afc51aaaSDarrick J. Wong } 241afc51aaaSDarrick J. Wong 242fad0a1abSChristoph Hellwig static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 2431b5c1e36SChristoph Hellwig loff_t pos) 244009d8d84SChristoph Hellwig { 245fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 2461b5c1e36SChristoph Hellwig 2471b5c1e36SChristoph Hellwig return srcmap->type != IOMAP_MAPPED || 2481b5c1e36SChristoph Hellwig (srcmap->flags & IOMAP_F_NEW) || 2491b5c1e36SChristoph Hellwig pos >= i_size_read(iter->inode); 250009d8d84SChristoph Hellwig } 251009d8d84SChristoph Hellwig 252fad0a1abSChristoph Hellwig static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 253f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx, loff_t offset) 254afc51aaaSDarrick J. Wong { 255fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 256f6d48000SChristoph Hellwig loff_t pos = iter->pos + offset; 257f6d48000SChristoph Hellwig loff_t length = iomap_length(iter) - offset; 258afc51aaaSDarrick J. Wong struct page *page = ctx->cur_page; 259637d3375SAndreas Gruenbacher struct iomap_page *iop; 260afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 261afc51aaaSDarrick J. Wong unsigned poff, plen; 262afc51aaaSDarrick J. Wong sector_t sector; 263afc51aaaSDarrick J. Wong 2645ad448ceSAndreas Gruenbacher if (iomap->type == IOMAP_INLINE) 2655ad448ceSAndreas Gruenbacher return iomap_read_inline_data(iter, page); 266afc51aaaSDarrick J. Wong 267afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 268f6d48000SChristoph Hellwig iop = iomap_page_create(iter->inode, page); 269f6d48000SChristoph Hellwig iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen); 270afc51aaaSDarrick J. Wong if (plen == 0) 271afc51aaaSDarrick J. Wong goto done; 272afc51aaaSDarrick J. Wong 2731b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, pos)) { 274afc51aaaSDarrick J. Wong zero_user(page, poff, plen); 275afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, poff, plen); 276afc51aaaSDarrick J. Wong goto done; 277afc51aaaSDarrick J. Wong } 278afc51aaaSDarrick J. Wong 279afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = true; 2807d636676SMatthew Wilcox (Oracle) if (iop) 2817d636676SMatthew Wilcox (Oracle) atomic_add(plen, &iop->read_bytes_pending); 282afc51aaaSDarrick J. Wong 283afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 284d0364f94SChristoph Hellwig if (!ctx->bio || 285d0364f94SChristoph Hellwig bio_end_sector(ctx->bio) != sector || 286d0364f94SChristoph Hellwig bio_add_page(ctx->bio, page, plen, poff) != plen) { 287afc51aaaSDarrick J. Wong gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 288457df33eSMatthew Wilcox (Oracle) gfp_t orig_gfp = gfp; 2895f7136dbSMatthew Wilcox (Oracle) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 290afc51aaaSDarrick J. Wong 291afc51aaaSDarrick J. Wong if (ctx->bio) 292afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 293afc51aaaSDarrick J. Wong 2949d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) /* same as readahead_gfp_mask */ 295afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 2965f7136dbSMatthew Wilcox (Oracle) ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); 297457df33eSMatthew Wilcox (Oracle) /* 298457df33eSMatthew Wilcox (Oracle) * If the bio_alloc fails, try it again for a single page to 299457df33eSMatthew Wilcox (Oracle) * avoid having to deal with partial page reads. This emulates 300457df33eSMatthew Wilcox (Oracle) * what do_mpage_readpage does. 301457df33eSMatthew Wilcox (Oracle) */ 302457df33eSMatthew Wilcox (Oracle) if (!ctx->bio) 303457df33eSMatthew Wilcox (Oracle) ctx->bio = bio_alloc(orig_gfp, 1); 304afc51aaaSDarrick J. Wong ctx->bio->bi_opf = REQ_OP_READ; 3059d24a13aSMatthew Wilcox (Oracle) if (ctx->rac) 306afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 307afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 308afc51aaaSDarrick J. Wong bio_set_dev(ctx->bio, iomap->bdev); 309afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 310d0364f94SChristoph Hellwig __bio_add_page(ctx->bio, page, plen, poff); 311afc51aaaSDarrick J. Wong } 312afc51aaaSDarrick J. Wong done: 313afc51aaaSDarrick J. Wong /* 314afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 315f1f264b4SAndreas Gruenbacher * For that, we have to include any leading non-uptodate ranges, but 316afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 317afc51aaaSDarrick J. Wong * iteration. 318afc51aaaSDarrick J. Wong */ 319afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 320afc51aaaSDarrick J. Wong } 321afc51aaaSDarrick J. Wong 322afc51aaaSDarrick J. Wong int 323afc51aaaSDarrick J. Wong iomap_readpage(struct page *page, const struct iomap_ops *ops) 324afc51aaaSDarrick J. Wong { 325f6d48000SChristoph Hellwig struct iomap_iter iter = { 326f6d48000SChristoph Hellwig .inode = page->mapping->host, 327f6d48000SChristoph Hellwig .pos = page_offset(page), 328f6d48000SChristoph Hellwig .len = PAGE_SIZE, 329f6d48000SChristoph Hellwig }; 330f6d48000SChristoph Hellwig struct iomap_readpage_ctx ctx = { 331f6d48000SChristoph Hellwig .cur_page = page, 332f6d48000SChristoph Hellwig }; 333f6d48000SChristoph Hellwig int ret; 334afc51aaaSDarrick J. Wong 3359e91c572SChristoph Hellwig trace_iomap_readpage(page->mapping->host, 1); 3369e91c572SChristoph Hellwig 337f6d48000SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 338f6d48000SChristoph Hellwig iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 339f6d48000SChristoph Hellwig 340f6d48000SChristoph Hellwig if (ret < 0) 341afc51aaaSDarrick J. Wong SetPageError(page); 342afc51aaaSDarrick J. Wong 343afc51aaaSDarrick J. Wong if (ctx.bio) { 344afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 345afc51aaaSDarrick J. Wong WARN_ON_ONCE(!ctx.cur_page_in_bio); 346afc51aaaSDarrick J. Wong } else { 347afc51aaaSDarrick J. Wong WARN_ON_ONCE(ctx.cur_page_in_bio); 348afc51aaaSDarrick J. Wong unlock_page(page); 349afc51aaaSDarrick J. Wong } 350afc51aaaSDarrick J. Wong 351afc51aaaSDarrick J. Wong /* 352f1f264b4SAndreas Gruenbacher * Just like mpage_readahead and block_read_full_page, we always 353afc51aaaSDarrick J. Wong * return 0 and just mark the page as PageError on errors. This 354f1f264b4SAndreas Gruenbacher * should be cleaned up throughout the stack eventually. 355afc51aaaSDarrick J. Wong */ 356afc51aaaSDarrick J. Wong return 0; 357afc51aaaSDarrick J. Wong } 358afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpage); 359afc51aaaSDarrick J. Wong 360fad0a1abSChristoph Hellwig static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 361f6d48000SChristoph Hellwig struct iomap_readpage_ctx *ctx) 362afc51aaaSDarrick J. Wong { 363f6d48000SChristoph Hellwig loff_t length = iomap_length(iter); 364afc51aaaSDarrick J. Wong loff_t done, ret; 365afc51aaaSDarrick J. Wong 366afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 367f6d48000SChristoph Hellwig if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) { 368afc51aaaSDarrick J. Wong if (!ctx->cur_page_in_bio) 369afc51aaaSDarrick J. Wong unlock_page(ctx->cur_page); 370afc51aaaSDarrick J. Wong put_page(ctx->cur_page); 371afc51aaaSDarrick J. Wong ctx->cur_page = NULL; 372afc51aaaSDarrick J. Wong } 373afc51aaaSDarrick J. Wong if (!ctx->cur_page) { 3749d24a13aSMatthew Wilcox (Oracle) ctx->cur_page = readahead_page(ctx->rac); 375afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = false; 376afc51aaaSDarrick J. Wong } 377f6d48000SChristoph Hellwig ret = iomap_readpage_iter(iter, ctx, done); 378d8af404fSAndreas Gruenbacher if (ret <= 0) 379d8af404fSAndreas Gruenbacher return ret; 380afc51aaaSDarrick J. Wong } 381afc51aaaSDarrick J. Wong 382afc51aaaSDarrick J. Wong return done; 383afc51aaaSDarrick J. Wong } 384afc51aaaSDarrick J. Wong 3859d24a13aSMatthew Wilcox (Oracle) /** 3869d24a13aSMatthew Wilcox (Oracle) * iomap_readahead - Attempt to read pages from a file. 3879d24a13aSMatthew Wilcox (Oracle) * @rac: Describes the pages to be read. 3889d24a13aSMatthew Wilcox (Oracle) * @ops: The operations vector for the filesystem. 3899d24a13aSMatthew Wilcox (Oracle) * 3909d24a13aSMatthew Wilcox (Oracle) * This function is for filesystems to call to implement their readahead 3919d24a13aSMatthew Wilcox (Oracle) * address_space operation. 3929d24a13aSMatthew Wilcox (Oracle) * 3939d24a13aSMatthew Wilcox (Oracle) * Context: The @ops callbacks may submit I/O (eg to read the addresses of 3949d24a13aSMatthew Wilcox (Oracle) * blocks from disc), and may wait for it. The caller may be trying to 3959d24a13aSMatthew Wilcox (Oracle) * access a different page, and so sleeping excessively should be avoided. 3969d24a13aSMatthew Wilcox (Oracle) * It may allocate memory, but should avoid costly allocations. This 3979d24a13aSMatthew Wilcox (Oracle) * function is called with memalloc_nofs set, so allocations will not cause 3989d24a13aSMatthew Wilcox (Oracle) * the filesystem to be reentered. 3999d24a13aSMatthew Wilcox (Oracle) */ 4009d24a13aSMatthew Wilcox (Oracle) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 401afc51aaaSDarrick J. Wong { 402f6d48000SChristoph Hellwig struct iomap_iter iter = { 403f6d48000SChristoph Hellwig .inode = rac->mapping->host, 404f6d48000SChristoph Hellwig .pos = readahead_pos(rac), 405f6d48000SChristoph Hellwig .len = readahead_length(rac), 406f6d48000SChristoph Hellwig }; 407afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 4089d24a13aSMatthew Wilcox (Oracle) .rac = rac, 409afc51aaaSDarrick J. Wong }; 410afc51aaaSDarrick J. Wong 411f6d48000SChristoph Hellwig trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 4129e91c572SChristoph Hellwig 413f6d48000SChristoph Hellwig while (iomap_iter(&iter, ops) > 0) 414f6d48000SChristoph Hellwig iter.processed = iomap_readahead_iter(&iter, &ctx); 4159d24a13aSMatthew Wilcox (Oracle) 416afc51aaaSDarrick J. Wong if (ctx.bio) 417afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 418afc51aaaSDarrick J. Wong if (ctx.cur_page) { 419afc51aaaSDarrick J. Wong if (!ctx.cur_page_in_bio) 420afc51aaaSDarrick J. Wong unlock_page(ctx.cur_page); 421afc51aaaSDarrick J. Wong put_page(ctx.cur_page); 422afc51aaaSDarrick J. Wong } 423afc51aaaSDarrick J. Wong } 4249d24a13aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(iomap_readahead); 425afc51aaaSDarrick J. Wong 426afc51aaaSDarrick J. Wong /* 427afc51aaaSDarrick J. Wong * iomap_is_partially_uptodate checks whether blocks within a page are 428afc51aaaSDarrick J. Wong * uptodate or not. 429afc51aaaSDarrick J. Wong * 430afc51aaaSDarrick J. Wong * Returns true if all blocks which correspond to a file portion 431afc51aaaSDarrick J. Wong * we want to read within the page are uptodate. 432afc51aaaSDarrick J. Wong */ 433afc51aaaSDarrick J. Wong int 434afc51aaaSDarrick J. Wong iomap_is_partially_uptodate(struct page *page, unsigned long from, 435afc51aaaSDarrick J. Wong unsigned long count) 436afc51aaaSDarrick J. Wong { 437*95c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 438*95c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 439afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 440afc51aaaSDarrick J. Wong unsigned len, first, last; 441afc51aaaSDarrick J. Wong unsigned i; 442afc51aaaSDarrick J. Wong 443afc51aaaSDarrick J. Wong /* Limit range to one page */ 444afc51aaaSDarrick J. Wong len = min_t(unsigned, PAGE_SIZE - from, count); 445afc51aaaSDarrick J. Wong 446afc51aaaSDarrick J. Wong /* First and last blocks in range within page */ 447afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 448afc51aaaSDarrick J. Wong last = (from + len - 1) >> inode->i_blkbits; 449afc51aaaSDarrick J. Wong 450afc51aaaSDarrick J. Wong if (iop) { 451afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 452afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 453afc51aaaSDarrick J. Wong return 0; 454afc51aaaSDarrick J. Wong return 1; 455afc51aaaSDarrick J. Wong } 456afc51aaaSDarrick J. Wong 457afc51aaaSDarrick J. Wong return 0; 458afc51aaaSDarrick J. Wong } 459afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 460afc51aaaSDarrick J. Wong 461afc51aaaSDarrick J. Wong int 462afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask) 463afc51aaaSDarrick J. Wong { 4641ac99452SMatthew Wilcox (Oracle) trace_iomap_releasepage(page->mapping->host, page_offset(page), 4651ac99452SMatthew Wilcox (Oracle) PAGE_SIZE); 4669e91c572SChristoph Hellwig 467afc51aaaSDarrick J. Wong /* 468afc51aaaSDarrick J. Wong * mm accommodates an old ext3 case where clean pages might not have had 469afc51aaaSDarrick J. Wong * the dirty bit cleared. Thus, it can send actual dirty pages to 470f1f264b4SAndreas Gruenbacher * ->releasepage() via shrink_active_list(); skip those here. 471afc51aaaSDarrick J. Wong */ 472afc51aaaSDarrick J. Wong if (PageDirty(page) || PageWriteback(page)) 473afc51aaaSDarrick J. Wong return 0; 474afc51aaaSDarrick J. Wong iomap_page_release(page); 475afc51aaaSDarrick J. Wong return 1; 476afc51aaaSDarrick J. Wong } 477afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage); 478afc51aaaSDarrick J. Wong 479afc51aaaSDarrick J. Wong void 480afc51aaaSDarrick J. Wong iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 481afc51aaaSDarrick J. Wong { 4821ac99452SMatthew Wilcox (Oracle) trace_iomap_invalidatepage(page->mapping->host, offset, len); 4839e91c572SChristoph Hellwig 484afc51aaaSDarrick J. Wong /* 485f1f264b4SAndreas Gruenbacher * If we're invalidating the entire page, clear the dirty state from it 486afc51aaaSDarrick J. Wong * and release it to avoid unnecessary buildup of the LRU. 487afc51aaaSDarrick J. Wong */ 488afc51aaaSDarrick J. Wong if (offset == 0 && len == PAGE_SIZE) { 489afc51aaaSDarrick J. Wong WARN_ON_ONCE(PageWriteback(page)); 490afc51aaaSDarrick J. Wong cancel_dirty_page(page); 491afc51aaaSDarrick J. Wong iomap_page_release(page); 492afc51aaaSDarrick J. Wong } 493afc51aaaSDarrick J. Wong } 494afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_invalidatepage); 495afc51aaaSDarrick J. Wong 496afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION 497afc51aaaSDarrick J. Wong int 498afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage, 499afc51aaaSDarrick J. Wong struct page *page, enum migrate_mode mode) 500afc51aaaSDarrick J. Wong { 501afc51aaaSDarrick J. Wong int ret; 502afc51aaaSDarrick J. Wong 50326473f83SLinus Torvalds ret = migrate_page_move_mapping(mapping, newpage, page, 0); 504afc51aaaSDarrick J. Wong if (ret != MIGRATEPAGE_SUCCESS) 505afc51aaaSDarrick J. Wong return ret; 506afc51aaaSDarrick J. Wong 50758aeb731SGuoqing Jiang if (page_has_private(page)) 50858aeb731SGuoqing Jiang attach_page_private(newpage, detach_page_private(page)); 509afc51aaaSDarrick J. Wong 510afc51aaaSDarrick J. Wong if (mode != MIGRATE_SYNC_NO_COPY) 511afc51aaaSDarrick J. Wong migrate_page_copy(newpage, page); 512afc51aaaSDarrick J. Wong else 513afc51aaaSDarrick J. Wong migrate_page_states(newpage, page); 514afc51aaaSDarrick J. Wong return MIGRATEPAGE_SUCCESS; 515afc51aaaSDarrick J. Wong } 516afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page); 517afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */ 518afc51aaaSDarrick J. Wong 519afc51aaaSDarrick J. Wong static void 520afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 521afc51aaaSDarrick J. Wong { 522afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 523afc51aaaSDarrick J. Wong 524afc51aaaSDarrick J. Wong /* 525afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 526afc51aaaSDarrick J. Wong * write started inside the existing inode size. 527afc51aaaSDarrick J. Wong */ 528afc51aaaSDarrick J. Wong if (pos + len > i_size) 529afc51aaaSDarrick J. Wong truncate_pagecache_range(inode, max(pos, i_size), pos + len); 530afc51aaaSDarrick J. Wong } 531afc51aaaSDarrick J. Wong 532afc51aaaSDarrick J. Wong static int 533d3b40439SChristoph Hellwig iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff, 5341acd9e9cSChristoph Hellwig unsigned plen, const struct iomap *iomap) 535afc51aaaSDarrick J. Wong { 536afc51aaaSDarrick J. Wong struct bio_vec bvec; 537afc51aaaSDarrick J. Wong struct bio bio; 538afc51aaaSDarrick J. Wong 539afc51aaaSDarrick J. Wong bio_init(&bio, &bvec, 1); 540afc51aaaSDarrick J. Wong bio.bi_opf = REQ_OP_READ; 541afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 542afc51aaaSDarrick J. Wong bio_set_dev(&bio, iomap->bdev); 543afc51aaaSDarrick J. Wong __bio_add_page(&bio, page, plen, poff); 544afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 545afc51aaaSDarrick J. Wong } 546afc51aaaSDarrick J. Wong 547fad0a1abSChristoph Hellwig static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 548b74b1293SChristoph Hellwig unsigned len, struct page *page) 549afc51aaaSDarrick J. Wong { 550fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 5511b5c1e36SChristoph Hellwig struct iomap_page *iop = iomap_page_create(iter->inode, page); 5521b5c1e36SChristoph Hellwig loff_t block_size = i_blocksize(iter->inode); 5536cc19c5fSNikolay Borisov loff_t block_start = round_down(pos, block_size); 5546cc19c5fSNikolay Borisov loff_t block_end = round_up(pos + len, block_size); 555afc51aaaSDarrick J. Wong unsigned from = offset_in_page(pos), to = from + len, poff, plen; 556afc51aaaSDarrick J. Wong 557afc51aaaSDarrick J. Wong if (PageUptodate(page)) 558afc51aaaSDarrick J. Wong return 0; 559e6e7ca92SMatthew Wilcox (Oracle) ClearPageError(page); 560afc51aaaSDarrick J. Wong 561afc51aaaSDarrick J. Wong do { 5621b5c1e36SChristoph Hellwig iomap_adjust_read_range(iter->inode, iop, &block_start, 563afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 564afc51aaaSDarrick J. Wong if (plen == 0) 565afc51aaaSDarrick J. Wong break; 566afc51aaaSDarrick J. Wong 567b74b1293SChristoph Hellwig if (!(iter->flags & IOMAP_UNSHARE) && 56832a38a49SChristoph Hellwig (from <= poff || from >= poff + plen) && 569d3b40439SChristoph Hellwig (to <= poff || to >= poff + plen)) 570d3b40439SChristoph Hellwig continue; 571d3b40439SChristoph Hellwig 5721b5c1e36SChristoph Hellwig if (iomap_block_needs_zeroing(iter, block_start)) { 573b74b1293SChristoph Hellwig if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 57432a38a49SChristoph Hellwig return -EIO; 575d3b40439SChristoph Hellwig zero_user_segments(page, poff, from, to, poff + plen); 57614284fedSMatthew Wilcox (Oracle) } else { 57714284fedSMatthew Wilcox (Oracle) int status = iomap_read_page_sync(block_start, page, 57814284fedSMatthew Wilcox (Oracle) poff, plen, srcmap); 579d3b40439SChristoph Hellwig if (status) 580d3b40439SChristoph Hellwig return status; 58114284fedSMatthew Wilcox (Oracle) } 58214284fedSMatthew Wilcox (Oracle) iomap_set_range_uptodate(page, poff, plen); 583afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 584afc51aaaSDarrick J. Wong 585d3b40439SChristoph Hellwig return 0; 586afc51aaaSDarrick J. Wong } 587afc51aaaSDarrick J. Wong 588fad0a1abSChristoph Hellwig static int iomap_write_begin_inline(const struct iomap_iter *iter, 5891b5c1e36SChristoph Hellwig struct page *page) 59069f4a26cSGao Xiang { 59169f4a26cSGao Xiang /* needs more work for the tailpacking case; disable for now */ 5921b5c1e36SChristoph Hellwig if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 59369f4a26cSGao Xiang return -EIO; 5945ad448ceSAndreas Gruenbacher return iomap_read_inline_data(iter, page); 59569f4a26cSGao Xiang } 59669f4a26cSGao Xiang 597fad0a1abSChristoph Hellwig static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 598fad0a1abSChristoph Hellwig unsigned len, struct page **pagep) 599afc51aaaSDarrick J. Wong { 6001b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 601fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 602afc51aaaSDarrick J. Wong struct page *page; 603d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio; 604afc51aaaSDarrick J. Wong int status = 0; 605afc51aaaSDarrick J. Wong 6061b5c1e36SChristoph Hellwig BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 6071b5c1e36SChristoph Hellwig if (srcmap != &iter->iomap) 608c039b997SGoldwyn Rodrigues BUG_ON(pos + len > srcmap->offset + srcmap->length); 609afc51aaaSDarrick J. Wong 610afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 611afc51aaaSDarrick J. Wong return -EINTR; 612afc51aaaSDarrick J. Wong 613afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_prepare) { 6141b5c1e36SChristoph Hellwig status = page_ops->page_prepare(iter->inode, pos, len); 615afc51aaaSDarrick J. Wong if (status) 616afc51aaaSDarrick J. Wong return status; 617afc51aaaSDarrick J. Wong } 618afc51aaaSDarrick J. Wong 6191b5c1e36SChristoph Hellwig page = grab_cache_page_write_begin(iter->inode->i_mapping, 6201b5c1e36SChristoph Hellwig pos >> PAGE_SHIFT, AOP_FLAG_NOFS); 621afc51aaaSDarrick J. Wong if (!page) { 622afc51aaaSDarrick J. Wong status = -ENOMEM; 623afc51aaaSDarrick J. Wong goto out_no_page; 624afc51aaaSDarrick J. Wong } 625d1bd0b4eSMatthew Wilcox (Oracle) folio = page_folio(page); 626afc51aaaSDarrick J. Wong 627c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) 6281b5c1e36SChristoph Hellwig status = iomap_write_begin_inline(iter, page); 6291b5c1e36SChristoph Hellwig else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 630d1bd0b4eSMatthew Wilcox (Oracle) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 631afc51aaaSDarrick J. Wong else 632b74b1293SChristoph Hellwig status = __iomap_write_begin(iter, pos, len, page); 633afc51aaaSDarrick J. Wong 634afc51aaaSDarrick J. Wong if (unlikely(status)) 635afc51aaaSDarrick J. Wong goto out_unlock; 636afc51aaaSDarrick J. Wong 637afc51aaaSDarrick J. Wong *pagep = page; 638afc51aaaSDarrick J. Wong return 0; 639afc51aaaSDarrick J. Wong 640afc51aaaSDarrick J. Wong out_unlock: 641afc51aaaSDarrick J. Wong unlock_page(page); 642afc51aaaSDarrick J. Wong put_page(page); 6431b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 644afc51aaaSDarrick J. Wong 645afc51aaaSDarrick J. Wong out_no_page: 646afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 6471b5c1e36SChristoph Hellwig page_ops->page_done(iter->inode, pos, 0, NULL); 648afc51aaaSDarrick J. Wong return status; 649afc51aaaSDarrick J. Wong } 650afc51aaaSDarrick J. Wong 651e25ba8cbSMatthew Wilcox (Oracle) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 652e25ba8cbSMatthew Wilcox (Oracle) size_t copied, struct page *page) 653afc51aaaSDarrick J. Wong { 654afc51aaaSDarrick J. Wong flush_dcache_page(page); 655afc51aaaSDarrick J. Wong 656afc51aaaSDarrick J. Wong /* 657afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 658afc51aaaSDarrick J. Wong * don't have to worry about a readpage reading them and overwriting a 659f1f264b4SAndreas Gruenbacher * partial write. However, if we've encountered a short write and only 660afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 661afc51aaaSDarrick J. Wong * readpage might come in and destroy our partial write. 662afc51aaaSDarrick J. Wong * 663f1f264b4SAndreas Gruenbacher * Do the simplest thing and just treat any short write to a 664f1f264b4SAndreas Gruenbacher * non-uptodate page as a zero-length write, and force the caller to 665f1f264b4SAndreas Gruenbacher * redo the whole thing. 666afc51aaaSDarrick J. Wong */ 667afc51aaaSDarrick J. Wong if (unlikely(copied < len && !PageUptodate(page))) 668afc51aaaSDarrick J. Wong return 0; 669afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, offset_in_page(pos), len); 670fd7353f8SMatthew Wilcox (Oracle) __set_page_dirty_nobuffers(page); 671afc51aaaSDarrick J. Wong return copied; 672afc51aaaSDarrick J. Wong } 673afc51aaaSDarrick J. Wong 674fad0a1abSChristoph Hellwig static size_t iomap_write_end_inline(const struct iomap_iter *iter, 675fad0a1abSChristoph Hellwig struct page *page, loff_t pos, size_t copied) 676afc51aaaSDarrick J. Wong { 677fad0a1abSChristoph Hellwig const struct iomap *iomap = &iter->iomap; 678afc51aaaSDarrick J. Wong void *addr; 679afc51aaaSDarrick J. Wong 680afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 68169f4a26cSGao Xiang BUG_ON(!iomap_inline_data_valid(iomap)); 682afc51aaaSDarrick J. Wong 6837ed3cd1aSMatthew Wilcox (Oracle) flush_dcache_page(page); 684ab069d5fSMatthew Wilcox (Oracle) addr = kmap_local_page(page) + pos; 685ab069d5fSMatthew Wilcox (Oracle) memcpy(iomap_inline_data(iomap, pos), addr, copied); 686ab069d5fSMatthew Wilcox (Oracle) kunmap_local(addr); 687afc51aaaSDarrick J. Wong 6881b5c1e36SChristoph Hellwig mark_inode_dirty(iter->inode); 689afc51aaaSDarrick J. Wong return copied; 690afc51aaaSDarrick J. Wong } 691afc51aaaSDarrick J. Wong 692e25ba8cbSMatthew Wilcox (Oracle) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 6931b5c1e36SChristoph Hellwig static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 6941b5c1e36SChristoph Hellwig size_t copied, struct page *page) 695afc51aaaSDarrick J. Wong { 6961b5c1e36SChristoph Hellwig const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 697fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 6981b5c1e36SChristoph Hellwig loff_t old_size = iter->inode->i_size; 699e25ba8cbSMatthew Wilcox (Oracle) size_t ret; 700afc51aaaSDarrick J. Wong 701c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_INLINE) { 7021b5c1e36SChristoph Hellwig ret = iomap_write_end_inline(iter, page, pos, copied); 703c039b997SGoldwyn Rodrigues } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 7041b5c1e36SChristoph Hellwig ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 7051b5c1e36SChristoph Hellwig copied, page, NULL); 706afc51aaaSDarrick J. Wong } else { 7071b5c1e36SChristoph Hellwig ret = __iomap_write_end(iter->inode, pos, len, copied, page); 708afc51aaaSDarrick J. Wong } 709afc51aaaSDarrick J. Wong 710afc51aaaSDarrick J. Wong /* 711afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 712afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 713afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 714afc51aaaSDarrick J. Wong */ 715afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 7161b5c1e36SChristoph Hellwig i_size_write(iter->inode, pos + ret); 7171b5c1e36SChristoph Hellwig iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 718afc51aaaSDarrick J. Wong } 719afc51aaaSDarrick J. Wong unlock_page(page); 720afc51aaaSDarrick J. Wong 721afc51aaaSDarrick J. Wong if (old_size < pos) 7221b5c1e36SChristoph Hellwig pagecache_isize_extended(iter->inode, old_size, pos); 723afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 7241b5c1e36SChristoph Hellwig page_ops->page_done(iter->inode, pos, ret, page); 725afc51aaaSDarrick J. Wong put_page(page); 726afc51aaaSDarrick J. Wong 727afc51aaaSDarrick J. Wong if (ret < len) 7281b5c1e36SChristoph Hellwig iomap_write_failed(iter->inode, pos, len); 729afc51aaaSDarrick J. Wong return ret; 730afc51aaaSDarrick J. Wong } 731afc51aaaSDarrick J. Wong 732ce83a025SChristoph Hellwig static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 733afc51aaaSDarrick J. Wong { 734ce83a025SChristoph Hellwig loff_t length = iomap_length(iter); 735ce83a025SChristoph Hellwig loff_t pos = iter->pos; 736afc51aaaSDarrick J. Wong ssize_t written = 0; 737ce83a025SChristoph Hellwig long status = 0; 738afc51aaaSDarrick J. Wong 739afc51aaaSDarrick J. Wong do { 740afc51aaaSDarrick J. Wong struct page *page; 741afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 742afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 743afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 744afc51aaaSDarrick J. Wong 745afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 746afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 747afc51aaaSDarrick J. Wong iov_iter_count(i)); 748afc51aaaSDarrick J. Wong again: 749afc51aaaSDarrick J. Wong if (bytes > length) 750afc51aaaSDarrick J. Wong bytes = length; 751afc51aaaSDarrick J. Wong 752afc51aaaSDarrick J. Wong /* 753f1f264b4SAndreas Gruenbacher * Bring in the user page that we'll copy from _first_. 754afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 755afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 756afc51aaaSDarrick J. Wong * up-to-date. 757afc51aaaSDarrick J. Wong */ 758a6294593SAndreas Gruenbacher if (unlikely(fault_in_iov_iter_readable(i, bytes))) { 759afc51aaaSDarrick J. Wong status = -EFAULT; 760afc51aaaSDarrick J. Wong break; 761afc51aaaSDarrick J. Wong } 762afc51aaaSDarrick J. Wong 763b74b1293SChristoph Hellwig status = iomap_write_begin(iter, pos, bytes, &page); 764afc51aaaSDarrick J. Wong if (unlikely(status)) 765afc51aaaSDarrick J. Wong break; 766afc51aaaSDarrick J. Wong 767ce83a025SChristoph Hellwig if (mapping_writably_mapped(iter->inode->i_mapping)) 768afc51aaaSDarrick J. Wong flush_dcache_page(page); 769afc51aaaSDarrick J. Wong 770f0b65f39SAl Viro copied = copy_page_from_iter_atomic(page, offset, bytes, i); 771afc51aaaSDarrick J. Wong 7721b5c1e36SChristoph Hellwig status = iomap_write_end(iter, pos, bytes, copied, page); 773afc51aaaSDarrick J. Wong 774f0b65f39SAl Viro if (unlikely(copied != status)) 775f0b65f39SAl Viro iov_iter_revert(i, copied - status); 776afc51aaaSDarrick J. Wong 777f0b65f39SAl Viro cond_resched(); 778bc1bb416SAl Viro if (unlikely(status == 0)) { 779afc51aaaSDarrick J. Wong /* 780bc1bb416SAl Viro * A short copy made iomap_write_end() reject the 781bc1bb416SAl Viro * thing entirely. Might be memory poisoning 782bc1bb416SAl Viro * halfway through, might be a race with munmap, 783bc1bb416SAl Viro * might be severe memory pressure. 784afc51aaaSDarrick J. Wong */ 785bc1bb416SAl Viro if (copied) 786bc1bb416SAl Viro bytes = copied; 787afc51aaaSDarrick J. Wong goto again; 788afc51aaaSDarrick J. Wong } 789f0b65f39SAl Viro pos += status; 790f0b65f39SAl Viro written += status; 791f0b65f39SAl Viro length -= status; 792afc51aaaSDarrick J. Wong 793ce83a025SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 794afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 795afc51aaaSDarrick J. Wong 796afc51aaaSDarrick J. Wong return written ? written : status; 797afc51aaaSDarrick J. Wong } 798afc51aaaSDarrick J. Wong 799afc51aaaSDarrick J. Wong ssize_t 800ce83a025SChristoph Hellwig iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 801afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 802afc51aaaSDarrick J. Wong { 803ce83a025SChristoph Hellwig struct iomap_iter iter = { 804ce83a025SChristoph Hellwig .inode = iocb->ki_filp->f_mapping->host, 805ce83a025SChristoph Hellwig .pos = iocb->ki_pos, 806ce83a025SChristoph Hellwig .len = iov_iter_count(i), 807ce83a025SChristoph Hellwig .flags = IOMAP_WRITE, 808ce83a025SChristoph Hellwig }; 809ce83a025SChristoph Hellwig int ret; 810afc51aaaSDarrick J. Wong 811ce83a025SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 812ce83a025SChristoph Hellwig iter.processed = iomap_write_iter(&iter, i); 813ce83a025SChristoph Hellwig if (iter.pos == iocb->ki_pos) 814ce83a025SChristoph Hellwig return ret; 815ce83a025SChristoph Hellwig return iter.pos - iocb->ki_pos; 816afc51aaaSDarrick J. Wong } 817afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 818afc51aaaSDarrick J. Wong 8198fc274d1SChristoph Hellwig static loff_t iomap_unshare_iter(struct iomap_iter *iter) 820afc51aaaSDarrick J. Wong { 8218fc274d1SChristoph Hellwig struct iomap *iomap = &iter->iomap; 822fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8238fc274d1SChristoph Hellwig loff_t pos = iter->pos; 8248fc274d1SChristoph Hellwig loff_t length = iomap_length(iter); 825afc51aaaSDarrick J. Wong long status = 0; 826d4ff3b2eSMatthew Wilcox (Oracle) loff_t written = 0; 827afc51aaaSDarrick J. Wong 8283590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 8293590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 8303590c4d8SChristoph Hellwig return length; 8313590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 832c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 8333590c4d8SChristoph Hellwig return length; 8343590c4d8SChristoph Hellwig 835afc51aaaSDarrick J. Wong do { 83632a38a49SChristoph Hellwig unsigned long offset = offset_in_page(pos); 83732a38a49SChristoph Hellwig unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 83832a38a49SChristoph Hellwig struct page *page; 839afc51aaaSDarrick J. Wong 840b74b1293SChristoph Hellwig status = iomap_write_begin(iter, pos, bytes, &page); 841afc51aaaSDarrick J. Wong if (unlikely(status)) 842afc51aaaSDarrick J. Wong return status; 843afc51aaaSDarrick J. Wong 8441b5c1e36SChristoph Hellwig status = iomap_write_end(iter, pos, bytes, bytes, page); 845afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 846afc51aaaSDarrick J. Wong return -EIO; 847afc51aaaSDarrick J. Wong 848afc51aaaSDarrick J. Wong cond_resched(); 849afc51aaaSDarrick J. Wong 850afc51aaaSDarrick J. Wong pos += status; 851afc51aaaSDarrick J. Wong written += status; 852afc51aaaSDarrick J. Wong length -= status; 853afc51aaaSDarrick J. Wong 8548fc274d1SChristoph Hellwig balance_dirty_pages_ratelimited(iter->inode->i_mapping); 855afc51aaaSDarrick J. Wong } while (length); 856afc51aaaSDarrick J. Wong 857afc51aaaSDarrick J. Wong return written; 858afc51aaaSDarrick J. Wong } 859afc51aaaSDarrick J. Wong 860afc51aaaSDarrick J. Wong int 8613590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 862afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 863afc51aaaSDarrick J. Wong { 8648fc274d1SChristoph Hellwig struct iomap_iter iter = { 8658fc274d1SChristoph Hellwig .inode = inode, 8668fc274d1SChristoph Hellwig .pos = pos, 8678fc274d1SChristoph Hellwig .len = len, 868b74b1293SChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_UNSHARE, 8698fc274d1SChristoph Hellwig }; 8708fc274d1SChristoph Hellwig int ret; 871afc51aaaSDarrick J. Wong 8728fc274d1SChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 8738fc274d1SChristoph Hellwig iter.processed = iomap_unshare_iter(&iter); 874afc51aaaSDarrick J. Wong return ret; 875afc51aaaSDarrick J. Wong } 8763590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 877afc51aaaSDarrick J. Wong 8781b5c1e36SChristoph Hellwig static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length) 879afc51aaaSDarrick J. Wong { 880afc51aaaSDarrick J. Wong struct page *page; 881afc51aaaSDarrick J. Wong int status; 88281ee8e52SMatthew Wilcox (Oracle) unsigned offset = offset_in_page(pos); 88381ee8e52SMatthew Wilcox (Oracle) unsigned bytes = min_t(u64, PAGE_SIZE - offset, length); 884afc51aaaSDarrick J. Wong 885b74b1293SChristoph Hellwig status = iomap_write_begin(iter, pos, bytes, &page); 886afc51aaaSDarrick J. Wong if (status) 887afc51aaaSDarrick J. Wong return status; 888afc51aaaSDarrick J. Wong 889afc51aaaSDarrick J. Wong zero_user(page, offset, bytes); 890afc51aaaSDarrick J. Wong mark_page_accessed(page); 891afc51aaaSDarrick J. Wong 8921b5c1e36SChristoph Hellwig return iomap_write_end(iter, pos, bytes, bytes, page); 893afc51aaaSDarrick J. Wong } 894afc51aaaSDarrick J. Wong 8952aa3048eSChristoph Hellwig static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 896afc51aaaSDarrick J. Wong { 8972aa3048eSChristoph Hellwig struct iomap *iomap = &iter->iomap; 898fad0a1abSChristoph Hellwig const struct iomap *srcmap = iomap_iter_srcmap(iter); 8992aa3048eSChristoph Hellwig loff_t pos = iter->pos; 9002aa3048eSChristoph Hellwig loff_t length = iomap_length(iter); 901afc51aaaSDarrick J. Wong loff_t written = 0; 902afc51aaaSDarrick J. Wong 903afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 904c039b997SGoldwyn Rodrigues if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 90581ee8e52SMatthew Wilcox (Oracle) return length; 906afc51aaaSDarrick J. Wong 907afc51aaaSDarrick J. Wong do { 90881ee8e52SMatthew Wilcox (Oracle) s64 bytes; 909afc51aaaSDarrick J. Wong 9102aa3048eSChristoph Hellwig if (IS_DAX(iter->inode)) 91181ee8e52SMatthew Wilcox (Oracle) bytes = dax_iomap_zero(pos, length, iomap); 912afc51aaaSDarrick J. Wong else 9131b5c1e36SChristoph Hellwig bytes = __iomap_zero_iter(iter, pos, length); 91481ee8e52SMatthew Wilcox (Oracle) if (bytes < 0) 91581ee8e52SMatthew Wilcox (Oracle) return bytes; 916afc51aaaSDarrick J. Wong 917afc51aaaSDarrick J. Wong pos += bytes; 91881ee8e52SMatthew Wilcox (Oracle) length -= bytes; 919afc51aaaSDarrick J. Wong written += bytes; 920afc51aaaSDarrick J. Wong if (did_zero) 921afc51aaaSDarrick J. Wong *did_zero = true; 92281ee8e52SMatthew Wilcox (Oracle) } while (length > 0); 923afc51aaaSDarrick J. Wong 924afc51aaaSDarrick J. Wong return written; 925afc51aaaSDarrick J. Wong } 926afc51aaaSDarrick J. Wong 927afc51aaaSDarrick J. Wong int 928afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 929afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 930afc51aaaSDarrick J. Wong { 9312aa3048eSChristoph Hellwig struct iomap_iter iter = { 9322aa3048eSChristoph Hellwig .inode = inode, 9332aa3048eSChristoph Hellwig .pos = pos, 9342aa3048eSChristoph Hellwig .len = len, 9352aa3048eSChristoph Hellwig .flags = IOMAP_ZERO, 9362aa3048eSChristoph Hellwig }; 9372aa3048eSChristoph Hellwig int ret; 938afc51aaaSDarrick J. Wong 9392aa3048eSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 9402aa3048eSChristoph Hellwig iter.processed = iomap_zero_iter(&iter, did_zero); 941afc51aaaSDarrick J. Wong return ret; 942afc51aaaSDarrick J. Wong } 943afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 944afc51aaaSDarrick J. Wong 945afc51aaaSDarrick J. Wong int 946afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 947afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 948afc51aaaSDarrick J. Wong { 949afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 950afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 951afc51aaaSDarrick J. Wong 952afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 953afc51aaaSDarrick J. Wong if (!off) 954afc51aaaSDarrick J. Wong return 0; 955afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 956afc51aaaSDarrick J. Wong } 957afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 958afc51aaaSDarrick J. Wong 959253564baSChristoph Hellwig static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter, 960253564baSChristoph Hellwig struct page *page) 961afc51aaaSDarrick J. Wong { 962d1bd0b4eSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 963253564baSChristoph Hellwig loff_t length = iomap_length(iter); 964afc51aaaSDarrick J. Wong int ret; 965afc51aaaSDarrick J. Wong 966253564baSChristoph Hellwig if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 967d1bd0b4eSMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, iter->pos, length, NULL, 968253564baSChristoph Hellwig &iter->iomap); 969afc51aaaSDarrick J. Wong if (ret) 970afc51aaaSDarrick J. Wong return ret; 971afc51aaaSDarrick J. Wong block_commit_write(page, 0, length); 972afc51aaaSDarrick J. Wong } else { 973afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 974afc51aaaSDarrick J. Wong set_page_dirty(page); 975afc51aaaSDarrick J. Wong } 976afc51aaaSDarrick J. Wong 977afc51aaaSDarrick J. Wong return length; 978afc51aaaSDarrick J. Wong } 979afc51aaaSDarrick J. Wong 980afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 981afc51aaaSDarrick J. Wong { 982253564baSChristoph Hellwig struct iomap_iter iter = { 983253564baSChristoph Hellwig .inode = file_inode(vmf->vma->vm_file), 984253564baSChristoph Hellwig .flags = IOMAP_WRITE | IOMAP_FAULT, 985253564baSChristoph Hellwig }; 986afc51aaaSDarrick J. Wong struct page *page = vmf->page; 987afc51aaaSDarrick J. Wong ssize_t ret; 988afc51aaaSDarrick J. Wong 989afc51aaaSDarrick J. Wong lock_page(page); 990253564baSChristoph Hellwig ret = page_mkwrite_check_truncate(page, iter.inode); 991243145bcSAndreas Gruenbacher if (ret < 0) 992afc51aaaSDarrick J. Wong goto out_unlock; 993253564baSChristoph Hellwig iter.pos = page_offset(page); 994253564baSChristoph Hellwig iter.len = ret; 995253564baSChristoph Hellwig while ((ret = iomap_iter(&iter, ops)) > 0) 996253564baSChristoph Hellwig iter.processed = iomap_page_mkwrite_iter(&iter, page); 997afc51aaaSDarrick J. Wong 998253564baSChristoph Hellwig if (ret < 0) 999afc51aaaSDarrick J. Wong goto out_unlock; 1000afc51aaaSDarrick J. Wong wait_for_stable_page(page); 1001afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1002afc51aaaSDarrick J. Wong out_unlock: 1003afc51aaaSDarrick J. Wong unlock_page(page); 1004afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1005afc51aaaSDarrick J. Wong } 1006afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1007598ecfbaSChristoph Hellwig 1008598ecfbaSChristoph Hellwig static void 100948d64cd1SChristoph Hellwig iomap_finish_page_writeback(struct inode *inode, struct page *page, 10100fb2d720SMatthew Wilcox (Oracle) int error, unsigned int len) 1011598ecfbaSChristoph Hellwig { 1012*95c4cd05SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 1013*95c4cd05SMatthew Wilcox (Oracle) struct iomap_page *iop = to_iomap_page(folio); 1014598ecfbaSChristoph Hellwig 1015598ecfbaSChristoph Hellwig if (error) { 101648d64cd1SChristoph Hellwig SetPageError(page); 1017b69eea82SDarrick J. Wong mapping_set_error(inode->i_mapping, error); 1018598ecfbaSChristoph Hellwig } 1019598ecfbaSChristoph Hellwig 102024addd84SMatthew Wilcox (Oracle) WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); 10210fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1022598ecfbaSChristoph Hellwig 10230fb2d720SMatthew Wilcox (Oracle) if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 102448d64cd1SChristoph Hellwig end_page_writeback(page); 1025598ecfbaSChristoph Hellwig } 1026598ecfbaSChristoph Hellwig 1027598ecfbaSChristoph Hellwig /* 1028598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1029598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1030598ecfbaSChristoph Hellwig * ioend after this. 1031598ecfbaSChristoph Hellwig */ 1032598ecfbaSChristoph Hellwig static void 1033598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1034598ecfbaSChristoph Hellwig { 1035598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1036598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1037598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1038598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1039c275779fSZorro Lang loff_t offset = ioend->io_offset; 1040598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1041598ecfbaSChristoph Hellwig 1042598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 1043598ecfbaSChristoph Hellwig struct bio_vec *bv; 1044598ecfbaSChristoph Hellwig struct bvec_iter_all iter_all; 1045598ecfbaSChristoph Hellwig 1046598ecfbaSChristoph Hellwig /* 1047598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1048598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1049598ecfbaSChristoph Hellwig */ 1050598ecfbaSChristoph Hellwig if (bio == last) 1051598ecfbaSChristoph Hellwig next = NULL; 1052598ecfbaSChristoph Hellwig else 1053598ecfbaSChristoph Hellwig next = bio->bi_private; 1054598ecfbaSChristoph Hellwig 1055598ecfbaSChristoph Hellwig /* walk each page on bio, ending page IO on them */ 1056598ecfbaSChristoph Hellwig bio_for_each_segment_all(bv, bio, iter_all) 10570fb2d720SMatthew Wilcox (Oracle) iomap_finish_page_writeback(inode, bv->bv_page, error, 10580fb2d720SMatthew Wilcox (Oracle) bv->bv_len); 1059598ecfbaSChristoph Hellwig bio_put(bio); 1060598ecfbaSChristoph Hellwig } 1061c275779fSZorro Lang /* The ioend has been freed by bio_put() */ 1062598ecfbaSChristoph Hellwig 1063598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1064598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 10659cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 1066c275779fSZorro Lang inode->i_sb->s_id, inode->i_ino, offset, start); 1067598ecfbaSChristoph Hellwig } 1068598ecfbaSChristoph Hellwig } 1069598ecfbaSChristoph Hellwig 1070598ecfbaSChristoph Hellwig void 1071598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1072598ecfbaSChristoph Hellwig { 1073598ecfbaSChristoph Hellwig struct list_head tmp; 1074598ecfbaSChristoph Hellwig 1075598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1076598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1077598ecfbaSChristoph Hellwig 1078598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1079598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1080598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1081598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1082598ecfbaSChristoph Hellwig } 1083598ecfbaSChristoph Hellwig } 1084598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1085598ecfbaSChristoph Hellwig 1086598ecfbaSChristoph Hellwig /* 1087598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1088598ecfbaSChristoph Hellwig */ 1089598ecfbaSChristoph Hellwig static bool 1090598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1091598ecfbaSChristoph Hellwig { 1092598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1093598ecfbaSChristoph Hellwig return false; 1094598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1095598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1096598ecfbaSChristoph Hellwig return false; 1097598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1098598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1099598ecfbaSChristoph Hellwig return false; 1100598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1101598ecfbaSChristoph Hellwig return false; 1102598ecfbaSChristoph Hellwig return true; 1103598ecfbaSChristoph Hellwig } 1104598ecfbaSChristoph Hellwig 1105598ecfbaSChristoph Hellwig void 11066e552494SBrian Foster iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1107598ecfbaSChristoph Hellwig { 1108598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1109598ecfbaSChristoph Hellwig 1110598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1111598ecfbaSChristoph Hellwig 1112598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1113598ecfbaSChristoph Hellwig io_list))) { 1114598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1115598ecfbaSChristoph Hellwig break; 1116598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1117598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1118598ecfbaSChristoph Hellwig } 1119598ecfbaSChristoph Hellwig } 1120598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1121598ecfbaSChristoph Hellwig 1122598ecfbaSChristoph Hellwig static int 11234f0f586bSSami Tolvanen iomap_ioend_compare(void *priv, const struct list_head *a, 11244f0f586bSSami Tolvanen const struct list_head *b) 1125598ecfbaSChristoph Hellwig { 1126b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1127b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1128598ecfbaSChristoph Hellwig 1129598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1130598ecfbaSChristoph Hellwig return -1; 1131b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1132598ecfbaSChristoph Hellwig return 1; 1133598ecfbaSChristoph Hellwig return 0; 1134598ecfbaSChristoph Hellwig } 1135598ecfbaSChristoph Hellwig 1136598ecfbaSChristoph Hellwig void 1137598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1138598ecfbaSChristoph Hellwig { 1139598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1140598ecfbaSChristoph Hellwig } 1141598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1142598ecfbaSChristoph Hellwig 1143598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1144598ecfbaSChristoph Hellwig { 1145598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1146598ecfbaSChristoph Hellwig 1147598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1148598ecfbaSChristoph Hellwig } 1149598ecfbaSChristoph Hellwig 1150598ecfbaSChristoph Hellwig /* 1151598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1152598ecfbaSChristoph Hellwig * 1153598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1154f1f264b4SAndreas Gruenbacher * the submission process has failed after we've marked pages for writeback 1155598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1156598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1157598ecfbaSChristoph Hellwig */ 1158598ecfbaSChristoph Hellwig static int 1159598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1160598ecfbaSChristoph Hellwig int error) 1161598ecfbaSChristoph Hellwig { 1162598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1163598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1164598ecfbaSChristoph Hellwig 1165598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1166598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1167598ecfbaSChristoph Hellwig if (error) { 1168598ecfbaSChristoph Hellwig /* 1169f1f264b4SAndreas Gruenbacher * If we're failing the IO now, just mark the ioend with an 1170598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1171598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1172598ecfbaSChristoph Hellwig * time. 1173598ecfbaSChristoph Hellwig */ 1174598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1175598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1176598ecfbaSChristoph Hellwig return error; 1177598ecfbaSChristoph Hellwig } 1178598ecfbaSChristoph Hellwig 1179598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1180598ecfbaSChristoph Hellwig return 0; 1181598ecfbaSChristoph Hellwig } 1182598ecfbaSChristoph Hellwig 1183598ecfbaSChristoph Hellwig static struct iomap_ioend * 1184598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1185598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1186598ecfbaSChristoph Hellwig { 1187598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1188598ecfbaSChristoph Hellwig struct bio *bio; 1189598ecfbaSChristoph Hellwig 1190a8affc03SChristoph Hellwig bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); 1191598ecfbaSChristoph Hellwig bio_set_dev(bio, wpc->iomap.bdev); 1192598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1193598ecfbaSChristoph Hellwig bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1194598ecfbaSChristoph Hellwig bio->bi_write_hint = inode->i_write_hint; 1195598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1196598ecfbaSChristoph Hellwig 1197598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1198598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1199598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1200598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1201598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1202598ecfbaSChristoph Hellwig ioend->io_size = 0; 1203598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1204598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1205598ecfbaSChristoph Hellwig return ioend; 1206598ecfbaSChristoph Hellwig } 1207598ecfbaSChristoph Hellwig 1208598ecfbaSChristoph Hellwig /* 1209598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1210598ecfbaSChristoph Hellwig * 1211f1f264b4SAndreas Gruenbacher * Note that we have to perform the chaining in this unintuitive order 1212598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1213598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1214598ecfbaSChristoph Hellwig */ 1215598ecfbaSChristoph Hellwig static struct bio * 1216598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1217598ecfbaSChristoph Hellwig { 1218598ecfbaSChristoph Hellwig struct bio *new; 1219598ecfbaSChristoph Hellwig 1220a8affc03SChristoph Hellwig new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); 1221598ecfbaSChristoph Hellwig bio_copy_dev(new, prev);/* also copies over blkcg information */ 1222598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1223598ecfbaSChristoph Hellwig new->bi_opf = prev->bi_opf; 1224598ecfbaSChristoph Hellwig new->bi_write_hint = prev->bi_write_hint; 1225598ecfbaSChristoph Hellwig 1226598ecfbaSChristoph Hellwig bio_chain(prev, new); 1227598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1228598ecfbaSChristoph Hellwig submit_bio(prev); 1229598ecfbaSChristoph Hellwig return new; 1230598ecfbaSChristoph Hellwig } 1231598ecfbaSChristoph Hellwig 1232598ecfbaSChristoph Hellwig static bool 1233598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1234598ecfbaSChristoph Hellwig sector_t sector) 1235598ecfbaSChristoph Hellwig { 1236598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1237598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1238598ecfbaSChristoph Hellwig return false; 1239598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1240598ecfbaSChristoph Hellwig return false; 1241598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1242598ecfbaSChristoph Hellwig return false; 1243598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1244598ecfbaSChristoph Hellwig return false; 1245598ecfbaSChristoph Hellwig return true; 1246598ecfbaSChristoph Hellwig } 1247598ecfbaSChristoph Hellwig 1248598ecfbaSChristoph Hellwig /* 1249598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1250f1f264b4SAndreas Gruenbacher * first; otherwise finish off the current ioend and start another. 1251598ecfbaSChristoph Hellwig */ 1252598ecfbaSChristoph Hellwig static void 1253598ecfbaSChristoph Hellwig iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, 1254598ecfbaSChristoph Hellwig struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1255598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1256598ecfbaSChristoph Hellwig { 1257598ecfbaSChristoph Hellwig sector_t sector = iomap_sector(&wpc->iomap, offset); 1258598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1259598ecfbaSChristoph Hellwig unsigned poff = offset & (PAGE_SIZE - 1); 1260598ecfbaSChristoph Hellwig 1261598ecfbaSChristoph Hellwig if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) { 1262598ecfbaSChristoph Hellwig if (wpc->ioend) 1263598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1264598ecfbaSChristoph Hellwig wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc); 1265598ecfbaSChristoph Hellwig } 1266598ecfbaSChristoph Hellwig 1267c1b79f11SChristoph Hellwig if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) { 1268c1b79f11SChristoph Hellwig wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1269c1b79f11SChristoph Hellwig __bio_add_page(wpc->ioend->io_bio, page, len, poff); 1270c1b79f11SChristoph Hellwig } 1271c1b79f11SChristoph Hellwig 12720fb2d720SMatthew Wilcox (Oracle) if (iop) 12730fb2d720SMatthew Wilcox (Oracle) atomic_add(len, &iop->write_bytes_pending); 1274598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1275598ecfbaSChristoph Hellwig wbc_account_cgroup_owner(wbc, page, len); 1276598ecfbaSChristoph Hellwig } 1277598ecfbaSChristoph Hellwig 1278598ecfbaSChristoph Hellwig /* 1279598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1280598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1281f1f264b4SAndreas Gruenbacher * the forward progress guarantees we need to provide. The current ioend we're 1282f1f264b4SAndreas Gruenbacher * adding blocks to is cached in the writepage context, and if the new block 1283f1f264b4SAndreas Gruenbacher * doesn't append to the cached ioend, it will create a new ioend and cache that 1284598ecfbaSChristoph Hellwig * instead. 1285598ecfbaSChristoph Hellwig * 1286598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1287598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1288598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1289598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1290598ecfbaSChristoph Hellwig * 1291598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1292598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1293598ecfbaSChristoph Hellwig */ 1294598ecfbaSChristoph Hellwig static int 1295598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1296598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1297598ecfbaSChristoph Hellwig struct page *page, u64 end_offset) 1298598ecfbaSChristoph Hellwig { 12998e1bcef8SAndreas Gruenbacher struct iomap_page *iop = iomap_page_create(inode, page); 1300598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1301598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1302598ecfbaSChristoph Hellwig u64 file_offset; /* file offset of page */ 1303598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1304598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1305598ecfbaSChristoph Hellwig 13060fb2d720SMatthew Wilcox (Oracle) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1307598ecfbaSChristoph Hellwig 1308598ecfbaSChristoph Hellwig /* 1309598ecfbaSChristoph Hellwig * Walk through the page to find areas to write back. If we run off the 1310598ecfbaSChristoph Hellwig * end of the current map or find the current map invalid, grab a new 1311598ecfbaSChristoph Hellwig * one. 1312598ecfbaSChristoph Hellwig */ 1313598ecfbaSChristoph Hellwig for (i = 0, file_offset = page_offset(page); 1314598ecfbaSChristoph Hellwig i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; 1315598ecfbaSChristoph Hellwig i++, file_offset += len) { 1316598ecfbaSChristoph Hellwig if (iop && !test_bit(i, iop->uptodate)) 1317598ecfbaSChristoph Hellwig continue; 1318598ecfbaSChristoph Hellwig 1319598ecfbaSChristoph Hellwig error = wpc->ops->map_blocks(wpc, inode, file_offset); 1320598ecfbaSChristoph Hellwig if (error) 1321598ecfbaSChristoph Hellwig break; 13223e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 13233e19e6f3SChristoph Hellwig continue; 1324598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1325598ecfbaSChristoph Hellwig continue; 1326598ecfbaSChristoph Hellwig iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, 1327598ecfbaSChristoph Hellwig &submit_list); 1328598ecfbaSChristoph Hellwig count++; 1329598ecfbaSChristoph Hellwig } 1330598ecfbaSChristoph Hellwig 1331598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1332598ecfbaSChristoph Hellwig WARN_ON_ONCE(!PageLocked(page)); 1333598ecfbaSChristoph Hellwig WARN_ON_ONCE(PageWriteback(page)); 133450e7d6c7SBrian Foster WARN_ON_ONCE(PageDirty(page)); 1335598ecfbaSChristoph Hellwig 1336598ecfbaSChristoph Hellwig /* 1337598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1338598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1339598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1340598ecfbaSChristoph Hellwig * appropriately. 1341598ecfbaSChristoph Hellwig */ 1342598ecfbaSChristoph Hellwig if (unlikely(error)) { 1343598ecfbaSChristoph Hellwig /* 1344763e4cdcSBrian Foster * Let the filesystem know what portion of the current page 1345f1f264b4SAndreas Gruenbacher * failed to map. If the page hasn't been added to ioend, it 1346763e4cdcSBrian Foster * won't be affected by I/O completion and we must unlock it 1347763e4cdcSBrian Foster * now. 1348598ecfbaSChristoph Hellwig */ 1349598ecfbaSChristoph Hellwig if (wpc->ops->discard_page) 1350763e4cdcSBrian Foster wpc->ops->discard_page(page, file_offset); 1351763e4cdcSBrian Foster if (!count) { 1352598ecfbaSChristoph Hellwig ClearPageUptodate(page); 1353598ecfbaSChristoph Hellwig unlock_page(page); 1354598ecfbaSChristoph Hellwig goto done; 1355598ecfbaSChristoph Hellwig } 1356598ecfbaSChristoph Hellwig } 1357598ecfbaSChristoph Hellwig 135850e7d6c7SBrian Foster set_page_writeback(page); 1359598ecfbaSChristoph Hellwig unlock_page(page); 1360598ecfbaSChristoph Hellwig 1361598ecfbaSChristoph Hellwig /* 1362f1f264b4SAndreas Gruenbacher * Preserve the original error if there was one; catch 1363598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1364598ecfbaSChristoph Hellwig * submissions. 1365598ecfbaSChristoph Hellwig */ 1366598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1367598ecfbaSChristoph Hellwig int error2; 1368598ecfbaSChristoph Hellwig 1369598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1370598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1371598ecfbaSChristoph Hellwig if (error2 && !error) 1372598ecfbaSChristoph Hellwig error = error2; 1373598ecfbaSChristoph Hellwig } 1374598ecfbaSChristoph Hellwig 1375598ecfbaSChristoph Hellwig /* 1376598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1377598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1378598ecfbaSChristoph Hellwig */ 1379598ecfbaSChristoph Hellwig if (!count) 1380598ecfbaSChristoph Hellwig end_page_writeback(page); 1381598ecfbaSChristoph Hellwig done: 1382598ecfbaSChristoph Hellwig mapping_set_error(page->mapping, error); 1383598ecfbaSChristoph Hellwig return error; 1384598ecfbaSChristoph Hellwig } 1385598ecfbaSChristoph Hellwig 1386598ecfbaSChristoph Hellwig /* 1387598ecfbaSChristoph Hellwig * Write out a dirty page. 1388598ecfbaSChristoph Hellwig * 1389f1f264b4SAndreas Gruenbacher * For delalloc space on the page, we need to allocate space and flush it. 1390f1f264b4SAndreas Gruenbacher * For unwritten space on the page, we need to start the conversion to 1391598ecfbaSChristoph Hellwig * regular allocated space. 1392598ecfbaSChristoph Hellwig */ 1393598ecfbaSChristoph Hellwig static int 1394598ecfbaSChristoph Hellwig iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1395598ecfbaSChristoph Hellwig { 1396598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1397598ecfbaSChristoph Hellwig struct inode *inode = page->mapping->host; 1398598ecfbaSChristoph Hellwig pgoff_t end_index; 1399598ecfbaSChristoph Hellwig u64 end_offset; 1400598ecfbaSChristoph Hellwig loff_t offset; 1401598ecfbaSChristoph Hellwig 14021ac99452SMatthew Wilcox (Oracle) trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE); 1403598ecfbaSChristoph Hellwig 1404598ecfbaSChristoph Hellwig /* 1405f1f264b4SAndreas Gruenbacher * Refuse to write the page out if we're called from reclaim context. 1406598ecfbaSChristoph Hellwig * 1407598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1408598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1409598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1410598ecfbaSChristoph Hellwig * 1411598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1412598ecfbaSChristoph Hellwig * warn about it. 1413598ecfbaSChristoph Hellwig */ 1414598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1415598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1416598ecfbaSChristoph Hellwig goto redirty; 1417598ecfbaSChristoph Hellwig 1418598ecfbaSChristoph Hellwig /* 1419598ecfbaSChristoph Hellwig * Is this page beyond the end of the file? 1420598ecfbaSChristoph Hellwig * 1421598ecfbaSChristoph Hellwig * The page index is less than the end_index, adjust the end_offset 1422598ecfbaSChristoph Hellwig * to the highest offset that this page should represent. 1423598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1424598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1425598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1426598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1427598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1428598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1429598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1430598ecfbaSChristoph Hellwig */ 1431598ecfbaSChristoph Hellwig offset = i_size_read(inode); 1432598ecfbaSChristoph Hellwig end_index = offset >> PAGE_SHIFT; 1433598ecfbaSChristoph Hellwig if (page->index < end_index) 1434598ecfbaSChristoph Hellwig end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT; 1435598ecfbaSChristoph Hellwig else { 1436598ecfbaSChristoph Hellwig /* 1437598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1438598ecfbaSChristoph Hellwig * i_size or not. 1439598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1440598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1441598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1442598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1443598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1444598ecfbaSChristoph Hellwig * | | Straddles | 1445598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1446598ecfbaSChristoph Hellwig */ 1447598ecfbaSChristoph Hellwig unsigned offset_into_page = offset & (PAGE_SIZE - 1); 1448598ecfbaSChristoph Hellwig 1449598ecfbaSChristoph Hellwig /* 1450f1f264b4SAndreas Gruenbacher * Skip the page if it's fully outside i_size, e.g. due to a 1451f1f264b4SAndreas Gruenbacher * truncate operation that's in progress. We must redirty the 1452598ecfbaSChristoph Hellwig * page so that reclaim stops reclaiming it. Otherwise 1453598ecfbaSChristoph Hellwig * iomap_vm_releasepage() is called on it and gets confused. 1454598ecfbaSChristoph Hellwig * 1455f1f264b4SAndreas Gruenbacher * Note that the end_index is unsigned long. If the given 1456f1f264b4SAndreas Gruenbacher * offset is greater than 16TB on a 32-bit system then if we 1457f1f264b4SAndreas Gruenbacher * checked if the page is fully outside i_size with 1458f1f264b4SAndreas Gruenbacher * "if (page->index >= end_index + 1)", "end_index + 1" would 1459f1f264b4SAndreas Gruenbacher * overflow and evaluate to 0. Hence this page would be 1460f1f264b4SAndreas Gruenbacher * redirtied and written out repeatedly, which would result in 1461f1f264b4SAndreas Gruenbacher * an infinite loop; the user program performing this operation 1462f1f264b4SAndreas Gruenbacher * would hang. Instead, we can detect this situation by 1463f1f264b4SAndreas Gruenbacher * checking if the page is totally beyond i_size or if its 1464598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1465598ecfbaSChristoph Hellwig */ 1466598ecfbaSChristoph Hellwig if (page->index > end_index || 1467598ecfbaSChristoph Hellwig (page->index == end_index && offset_into_page == 0)) 1468598ecfbaSChristoph Hellwig goto redirty; 1469598ecfbaSChristoph Hellwig 1470598ecfbaSChristoph Hellwig /* 1471598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1472598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1473598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1474598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1475598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1476598ecfbaSChristoph Hellwig * not written out to the file." 1477598ecfbaSChristoph Hellwig */ 1478598ecfbaSChristoph Hellwig zero_user_segment(page, offset_into_page, PAGE_SIZE); 1479598ecfbaSChristoph Hellwig 1480598ecfbaSChristoph Hellwig /* Adjust the end_offset to the end of file */ 1481598ecfbaSChristoph Hellwig end_offset = offset; 1482598ecfbaSChristoph Hellwig } 1483598ecfbaSChristoph Hellwig 1484598ecfbaSChristoph Hellwig return iomap_writepage_map(wpc, wbc, inode, page, end_offset); 1485598ecfbaSChristoph Hellwig 1486598ecfbaSChristoph Hellwig redirty: 1487598ecfbaSChristoph Hellwig redirty_page_for_writepage(wbc, page); 1488598ecfbaSChristoph Hellwig unlock_page(page); 1489598ecfbaSChristoph Hellwig return 0; 1490598ecfbaSChristoph Hellwig } 1491598ecfbaSChristoph Hellwig 1492598ecfbaSChristoph Hellwig int 1493598ecfbaSChristoph Hellwig iomap_writepage(struct page *page, struct writeback_control *wbc, 1494598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1495598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1496598ecfbaSChristoph Hellwig { 1497598ecfbaSChristoph Hellwig int ret; 1498598ecfbaSChristoph Hellwig 1499598ecfbaSChristoph Hellwig wpc->ops = ops; 1500598ecfbaSChristoph Hellwig ret = iomap_do_writepage(page, wbc, wpc); 1501598ecfbaSChristoph Hellwig if (!wpc->ioend) 1502598ecfbaSChristoph Hellwig return ret; 1503598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1504598ecfbaSChristoph Hellwig } 1505598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepage); 1506598ecfbaSChristoph Hellwig 1507598ecfbaSChristoph Hellwig int 1508598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1509598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1510598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1511598ecfbaSChristoph Hellwig { 1512598ecfbaSChristoph Hellwig int ret; 1513598ecfbaSChristoph Hellwig 1514598ecfbaSChristoph Hellwig wpc->ops = ops; 1515598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1516598ecfbaSChristoph Hellwig if (!wpc->ioend) 1517598ecfbaSChristoph Hellwig return ret; 1518598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1519598ecfbaSChristoph Hellwig } 1520598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1521598ecfbaSChristoph Hellwig 1522598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1523598ecfbaSChristoph Hellwig { 1524598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1525598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1526598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1527598ecfbaSChristoph Hellwig } 1528598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1529