1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4598ecfbaSChristoph Hellwig * Copyright (C) 2016-2019 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15598ecfbaSChristoph Hellwig #include <linux/list_sort.h> 16afc51aaaSDarrick J. Wong #include <linux/swap.h> 17afc51aaaSDarrick J. Wong #include <linux/bio.h> 18afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 19afc51aaaSDarrick J. Wong #include <linux/migrate.h> 209e91c572SChristoph Hellwig #include "trace.h" 21afc51aaaSDarrick J. Wong 22afc51aaaSDarrick J. Wong #include "../internal.h" 23afc51aaaSDarrick J. Wong 24ab08b01eSChristoph Hellwig /* 25ab08b01eSChristoph Hellwig * Structure allocated for each page when block size < PAGE_SIZE to track 26ab08b01eSChristoph Hellwig * sub-page uptodate status and I/O completions. 27ab08b01eSChristoph Hellwig */ 28ab08b01eSChristoph Hellwig struct iomap_page { 29ab08b01eSChristoph Hellwig atomic_t read_count; 30ab08b01eSChristoph Hellwig atomic_t write_count; 31ab08b01eSChristoph Hellwig DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); 32ab08b01eSChristoph Hellwig }; 33ab08b01eSChristoph Hellwig 34ab08b01eSChristoph Hellwig static inline struct iomap_page *to_iomap_page(struct page *page) 35ab08b01eSChristoph Hellwig { 36ab08b01eSChristoph Hellwig if (page_has_private(page)) 37ab08b01eSChristoph Hellwig return (struct iomap_page *)page_private(page); 38ab08b01eSChristoph Hellwig return NULL; 39ab08b01eSChristoph Hellwig } 40ab08b01eSChristoph Hellwig 41598ecfbaSChristoph Hellwig static struct bio_set iomap_ioend_bioset; 42598ecfbaSChristoph Hellwig 43afc51aaaSDarrick J. Wong static struct iomap_page * 44afc51aaaSDarrick J. Wong iomap_page_create(struct inode *inode, struct page *page) 45afc51aaaSDarrick J. Wong { 46afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 47afc51aaaSDarrick J. Wong 48afc51aaaSDarrick J. Wong if (iop || i_blocksize(inode) == PAGE_SIZE) 49afc51aaaSDarrick J. Wong return iop; 50afc51aaaSDarrick J. Wong 51afc51aaaSDarrick J. Wong iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); 52afc51aaaSDarrick J. Wong atomic_set(&iop->read_count, 0); 53afc51aaaSDarrick J. Wong atomic_set(&iop->write_count, 0); 54afc51aaaSDarrick J. Wong bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 55afc51aaaSDarrick J. Wong 56afc51aaaSDarrick J. Wong /* 57afc51aaaSDarrick J. Wong * migrate_page_move_mapping() assumes that pages with private data have 58afc51aaaSDarrick J. Wong * their count elevated by 1. 59afc51aaaSDarrick J. Wong */ 60afc51aaaSDarrick J. Wong get_page(page); 61afc51aaaSDarrick J. Wong set_page_private(page, (unsigned long)iop); 62afc51aaaSDarrick J. Wong SetPagePrivate(page); 63afc51aaaSDarrick J. Wong return iop; 64afc51aaaSDarrick J. Wong } 65afc51aaaSDarrick J. Wong 66afc51aaaSDarrick J. Wong static void 67afc51aaaSDarrick J. Wong iomap_page_release(struct page *page) 68afc51aaaSDarrick J. Wong { 69afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 70afc51aaaSDarrick J. Wong 71afc51aaaSDarrick J. Wong if (!iop) 72afc51aaaSDarrick J. Wong return; 73afc51aaaSDarrick J. Wong WARN_ON_ONCE(atomic_read(&iop->read_count)); 74afc51aaaSDarrick J. Wong WARN_ON_ONCE(atomic_read(&iop->write_count)); 75afc51aaaSDarrick J. Wong ClearPagePrivate(page); 76afc51aaaSDarrick J. Wong set_page_private(page, 0); 77afc51aaaSDarrick J. Wong put_page(page); 78afc51aaaSDarrick J. Wong kfree(iop); 79afc51aaaSDarrick J. Wong } 80afc51aaaSDarrick J. Wong 81afc51aaaSDarrick J. Wong /* 82afc51aaaSDarrick J. Wong * Calculate the range inside the page that we actually need to read. 83afc51aaaSDarrick J. Wong */ 84afc51aaaSDarrick J. Wong static void 85afc51aaaSDarrick J. Wong iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 86afc51aaaSDarrick J. Wong loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 87afc51aaaSDarrick J. Wong { 88afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 89afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 90afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 91afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 92afc51aaaSDarrick J. Wong unsigned poff = offset_in_page(*pos); 93afc51aaaSDarrick J. Wong unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 94afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 95afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 96afc51aaaSDarrick J. Wong 97afc51aaaSDarrick J. Wong /* 98afc51aaaSDarrick J. Wong * If the block size is smaller than the page size we need to check the 99afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 100afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 101afc51aaaSDarrick J. Wong */ 102afc51aaaSDarrick J. Wong if (iop) { 103afc51aaaSDarrick J. Wong unsigned int i; 104afc51aaaSDarrick J. Wong 105afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 106afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 107afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 108afc51aaaSDarrick J. Wong break; 109afc51aaaSDarrick J. Wong *pos += block_size; 110afc51aaaSDarrick J. Wong poff += block_size; 111afc51aaaSDarrick J. Wong plen -= block_size; 112afc51aaaSDarrick J. Wong first++; 113afc51aaaSDarrick J. Wong } 114afc51aaaSDarrick J. Wong 115afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 116afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 117afc51aaaSDarrick J. Wong if (test_bit(i, iop->uptodate)) { 118afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 119afc51aaaSDarrick J. Wong last = i - 1; 120afc51aaaSDarrick J. Wong break; 121afc51aaaSDarrick J. Wong } 122afc51aaaSDarrick J. Wong } 123afc51aaaSDarrick J. Wong } 124afc51aaaSDarrick J. Wong 125afc51aaaSDarrick J. Wong /* 126afc51aaaSDarrick J. Wong * If the extent spans the block that contains the i_size we need to 127afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 128afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 129afc51aaaSDarrick J. Wong */ 130afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 131afc51aaaSDarrick J. Wong unsigned end = offset_in_page(isize - 1) >> block_bits; 132afc51aaaSDarrick J. Wong 133afc51aaaSDarrick J. Wong if (first <= end && last > end) 134afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 135afc51aaaSDarrick J. Wong } 136afc51aaaSDarrick J. Wong 137afc51aaaSDarrick J. Wong *offp = poff; 138afc51aaaSDarrick J. Wong *lenp = plen; 139afc51aaaSDarrick J. Wong } 140afc51aaaSDarrick J. Wong 141afc51aaaSDarrick J. Wong static void 142afc51aaaSDarrick J. Wong iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) 143afc51aaaSDarrick J. Wong { 144afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 145afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 146afc51aaaSDarrick J. Wong unsigned first = off >> inode->i_blkbits; 147afc51aaaSDarrick J. Wong unsigned last = (off + len - 1) >> inode->i_blkbits; 148afc51aaaSDarrick J. Wong unsigned int i; 149afc51aaaSDarrick J. Wong bool uptodate = true; 150afc51aaaSDarrick J. Wong 151afc51aaaSDarrick J. Wong if (iop) { 152afc51aaaSDarrick J. Wong for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { 153afc51aaaSDarrick J. Wong if (i >= first && i <= last) 154afc51aaaSDarrick J. Wong set_bit(i, iop->uptodate); 155afc51aaaSDarrick J. Wong else if (!test_bit(i, iop->uptodate)) 156afc51aaaSDarrick J. Wong uptodate = false; 157afc51aaaSDarrick J. Wong } 158afc51aaaSDarrick J. Wong } 159afc51aaaSDarrick J. Wong 160afc51aaaSDarrick J. Wong if (uptodate && !PageError(page)) 161afc51aaaSDarrick J. Wong SetPageUptodate(page); 162afc51aaaSDarrick J. Wong } 163afc51aaaSDarrick J. Wong 164afc51aaaSDarrick J. Wong static void 165afc51aaaSDarrick J. Wong iomap_read_finish(struct iomap_page *iop, struct page *page) 166afc51aaaSDarrick J. Wong { 167afc51aaaSDarrick J. Wong if (!iop || atomic_dec_and_test(&iop->read_count)) 168afc51aaaSDarrick J. Wong unlock_page(page); 169afc51aaaSDarrick J. Wong } 170afc51aaaSDarrick J. Wong 171afc51aaaSDarrick J. Wong static void 172afc51aaaSDarrick J. Wong iomap_read_page_end_io(struct bio_vec *bvec, int error) 173afc51aaaSDarrick J. Wong { 174afc51aaaSDarrick J. Wong struct page *page = bvec->bv_page; 175afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 176afc51aaaSDarrick J. Wong 177afc51aaaSDarrick J. Wong if (unlikely(error)) { 178afc51aaaSDarrick J. Wong ClearPageUptodate(page); 179afc51aaaSDarrick J. Wong SetPageError(page); 180afc51aaaSDarrick J. Wong } else { 181afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); 182afc51aaaSDarrick J. Wong } 183afc51aaaSDarrick J. Wong 184afc51aaaSDarrick J. Wong iomap_read_finish(iop, page); 185afc51aaaSDarrick J. Wong } 186afc51aaaSDarrick J. Wong 187afc51aaaSDarrick J. Wong static void 188afc51aaaSDarrick J. Wong iomap_read_end_io(struct bio *bio) 189afc51aaaSDarrick J. Wong { 190afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 191afc51aaaSDarrick J. Wong struct bio_vec *bvec; 192afc51aaaSDarrick J. Wong struct bvec_iter_all iter_all; 193afc51aaaSDarrick J. Wong 194afc51aaaSDarrick J. Wong bio_for_each_segment_all(bvec, bio, iter_all) 195afc51aaaSDarrick J. Wong iomap_read_page_end_io(bvec, error); 196afc51aaaSDarrick J. Wong bio_put(bio); 197afc51aaaSDarrick J. Wong } 198afc51aaaSDarrick J. Wong 199afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 200afc51aaaSDarrick J. Wong struct page *cur_page; 201afc51aaaSDarrick J. Wong bool cur_page_in_bio; 202afc51aaaSDarrick J. Wong bool is_readahead; 203afc51aaaSDarrick J. Wong struct bio *bio; 204afc51aaaSDarrick J. Wong struct list_head *pages; 205afc51aaaSDarrick J. Wong }; 206afc51aaaSDarrick J. Wong 207afc51aaaSDarrick J. Wong static void 208afc51aaaSDarrick J. Wong iomap_read_inline_data(struct inode *inode, struct page *page, 209afc51aaaSDarrick J. Wong struct iomap *iomap) 210afc51aaaSDarrick J. Wong { 211afc51aaaSDarrick J. Wong size_t size = i_size_read(inode); 212afc51aaaSDarrick J. Wong void *addr; 213afc51aaaSDarrick J. Wong 214afc51aaaSDarrick J. Wong if (PageUptodate(page)) 215afc51aaaSDarrick J. Wong return; 216afc51aaaSDarrick J. Wong 217afc51aaaSDarrick J. Wong BUG_ON(page->index); 218afc51aaaSDarrick J. Wong BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); 219afc51aaaSDarrick J. Wong 220afc51aaaSDarrick J. Wong addr = kmap_atomic(page); 221afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 222afc51aaaSDarrick J. Wong memset(addr + size, 0, PAGE_SIZE - size); 223afc51aaaSDarrick J. Wong kunmap_atomic(addr); 224afc51aaaSDarrick J. Wong SetPageUptodate(page); 225afc51aaaSDarrick J. Wong } 226afc51aaaSDarrick J. Wong 227009d8d84SChristoph Hellwig static inline bool iomap_block_needs_zeroing(struct inode *inode, 228009d8d84SChristoph Hellwig struct iomap *iomap, loff_t pos) 229009d8d84SChristoph Hellwig { 230009d8d84SChristoph Hellwig return iomap->type != IOMAP_MAPPED || 231009d8d84SChristoph Hellwig (iomap->flags & IOMAP_F_NEW) || 232009d8d84SChristoph Hellwig pos >= i_size_read(inode); 233009d8d84SChristoph Hellwig } 234009d8d84SChristoph Hellwig 235afc51aaaSDarrick J. Wong static loff_t 236afc51aaaSDarrick J. Wong iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 237afc51aaaSDarrick J. Wong struct iomap *iomap) 238afc51aaaSDarrick J. Wong { 239afc51aaaSDarrick J. Wong struct iomap_readpage_ctx *ctx = data; 240afc51aaaSDarrick J. Wong struct page *page = ctx->cur_page; 241afc51aaaSDarrick J. Wong struct iomap_page *iop = iomap_page_create(inode, page); 242afc51aaaSDarrick J. Wong bool same_page = false, is_contig = false; 243afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 244afc51aaaSDarrick J. Wong unsigned poff, plen; 245afc51aaaSDarrick J. Wong sector_t sector; 246afc51aaaSDarrick J. Wong 247afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) { 248afc51aaaSDarrick J. Wong WARN_ON_ONCE(pos); 249afc51aaaSDarrick J. Wong iomap_read_inline_data(inode, page, iomap); 250afc51aaaSDarrick J. Wong return PAGE_SIZE; 251afc51aaaSDarrick J. Wong } 252afc51aaaSDarrick J. Wong 253afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 254afc51aaaSDarrick J. Wong iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); 255afc51aaaSDarrick J. Wong if (plen == 0) 256afc51aaaSDarrick J. Wong goto done; 257afc51aaaSDarrick J. Wong 258009d8d84SChristoph Hellwig if (iomap_block_needs_zeroing(inode, iomap, pos)) { 259afc51aaaSDarrick J. Wong zero_user(page, poff, plen); 260afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, poff, plen); 261afc51aaaSDarrick J. Wong goto done; 262afc51aaaSDarrick J. Wong } 263afc51aaaSDarrick J. Wong 264afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = true; 265afc51aaaSDarrick J. Wong 266afc51aaaSDarrick J. Wong /* 267afc51aaaSDarrick J. Wong * Try to merge into a previous segment if we can. 268afc51aaaSDarrick J. Wong */ 269afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 270afc51aaaSDarrick J. Wong if (ctx->bio && bio_end_sector(ctx->bio) == sector) 271afc51aaaSDarrick J. Wong is_contig = true; 272afc51aaaSDarrick J. Wong 273afc51aaaSDarrick J. Wong if (is_contig && 274afc51aaaSDarrick J. Wong __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) { 275afc51aaaSDarrick J. Wong if (!same_page && iop) 276afc51aaaSDarrick J. Wong atomic_inc(&iop->read_count); 277afc51aaaSDarrick J. Wong goto done; 278afc51aaaSDarrick J. Wong } 279afc51aaaSDarrick J. Wong 280afc51aaaSDarrick J. Wong /* 281afc51aaaSDarrick J. Wong * If we start a new segment we need to increase the read count, and we 282afc51aaaSDarrick J. Wong * need to do so before submitting any previous full bio to make sure 283afc51aaaSDarrick J. Wong * that we don't prematurely unlock the page. 284afc51aaaSDarrick J. Wong */ 285afc51aaaSDarrick J. Wong if (iop) 286afc51aaaSDarrick J. Wong atomic_inc(&iop->read_count); 287afc51aaaSDarrick J. Wong 288afc51aaaSDarrick J. Wong if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) { 289afc51aaaSDarrick J. Wong gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 290afc51aaaSDarrick J. Wong int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 291afc51aaaSDarrick J. Wong 292afc51aaaSDarrick J. Wong if (ctx->bio) 293afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 294afc51aaaSDarrick J. Wong 295afc51aaaSDarrick J. Wong if (ctx->is_readahead) /* same as readahead_gfp_mask */ 296afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 297afc51aaaSDarrick J. Wong ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); 298afc51aaaSDarrick J. Wong ctx->bio->bi_opf = REQ_OP_READ; 299afc51aaaSDarrick J. Wong if (ctx->is_readahead) 300afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 301afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 302afc51aaaSDarrick J. Wong bio_set_dev(ctx->bio, iomap->bdev); 303afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 304afc51aaaSDarrick J. Wong } 305afc51aaaSDarrick J. Wong 306afc51aaaSDarrick J. Wong bio_add_page(ctx->bio, page, plen, poff); 307afc51aaaSDarrick J. Wong done: 308afc51aaaSDarrick J. Wong /* 309afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 310afc51aaaSDarrick J. Wong * For that we have to include any leading non-uptodate ranges, but 311afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 312afc51aaaSDarrick J. Wong * iteration. 313afc51aaaSDarrick J. Wong */ 314afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 315afc51aaaSDarrick J. Wong } 316afc51aaaSDarrick J. Wong 317afc51aaaSDarrick J. Wong int 318afc51aaaSDarrick J. Wong iomap_readpage(struct page *page, const struct iomap_ops *ops) 319afc51aaaSDarrick J. Wong { 320afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { .cur_page = page }; 321afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 322afc51aaaSDarrick J. Wong unsigned poff; 323afc51aaaSDarrick J. Wong loff_t ret; 324afc51aaaSDarrick J. Wong 3259e91c572SChristoph Hellwig trace_iomap_readpage(page->mapping->host, 1); 3269e91c572SChristoph Hellwig 327afc51aaaSDarrick J. Wong for (poff = 0; poff < PAGE_SIZE; poff += ret) { 328afc51aaaSDarrick J. Wong ret = iomap_apply(inode, page_offset(page) + poff, 329afc51aaaSDarrick J. Wong PAGE_SIZE - poff, 0, ops, &ctx, 330afc51aaaSDarrick J. Wong iomap_readpage_actor); 331afc51aaaSDarrick J. Wong if (ret <= 0) { 332afc51aaaSDarrick J. Wong WARN_ON_ONCE(ret == 0); 333afc51aaaSDarrick J. Wong SetPageError(page); 334afc51aaaSDarrick J. Wong break; 335afc51aaaSDarrick J. Wong } 336afc51aaaSDarrick J. Wong } 337afc51aaaSDarrick J. Wong 338afc51aaaSDarrick J. Wong if (ctx.bio) { 339afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 340afc51aaaSDarrick J. Wong WARN_ON_ONCE(!ctx.cur_page_in_bio); 341afc51aaaSDarrick J. Wong } else { 342afc51aaaSDarrick J. Wong WARN_ON_ONCE(ctx.cur_page_in_bio); 343afc51aaaSDarrick J. Wong unlock_page(page); 344afc51aaaSDarrick J. Wong } 345afc51aaaSDarrick J. Wong 346afc51aaaSDarrick J. Wong /* 347afc51aaaSDarrick J. Wong * Just like mpage_readpages and block_read_full_page we always 348afc51aaaSDarrick J. Wong * return 0 and just mark the page as PageError on errors. This 349afc51aaaSDarrick J. Wong * should be cleaned up all through the stack eventually. 350afc51aaaSDarrick J. Wong */ 351afc51aaaSDarrick J. Wong return 0; 352afc51aaaSDarrick J. Wong } 353afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpage); 354afc51aaaSDarrick J. Wong 355afc51aaaSDarrick J. Wong static struct page * 356afc51aaaSDarrick J. Wong iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, 357afc51aaaSDarrick J. Wong loff_t length, loff_t *done) 358afc51aaaSDarrick J. Wong { 359afc51aaaSDarrick J. Wong while (!list_empty(pages)) { 360afc51aaaSDarrick J. Wong struct page *page = lru_to_page(pages); 361afc51aaaSDarrick J. Wong 362afc51aaaSDarrick J. Wong if (page_offset(page) >= (u64)pos + length) 363afc51aaaSDarrick J. Wong break; 364afc51aaaSDarrick J. Wong 365afc51aaaSDarrick J. Wong list_del(&page->lru); 366afc51aaaSDarrick J. Wong if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, 367afc51aaaSDarrick J. Wong GFP_NOFS)) 368afc51aaaSDarrick J. Wong return page; 369afc51aaaSDarrick J. Wong 370afc51aaaSDarrick J. Wong /* 371afc51aaaSDarrick J. Wong * If we already have a page in the page cache at index we are 372afc51aaaSDarrick J. Wong * done. Upper layers don't care if it is uptodate after the 373afc51aaaSDarrick J. Wong * readpages call itself as every page gets checked again once 374afc51aaaSDarrick J. Wong * actually needed. 375afc51aaaSDarrick J. Wong */ 376afc51aaaSDarrick J. Wong *done += PAGE_SIZE; 377afc51aaaSDarrick J. Wong put_page(page); 378afc51aaaSDarrick J. Wong } 379afc51aaaSDarrick J. Wong 380afc51aaaSDarrick J. Wong return NULL; 381afc51aaaSDarrick J. Wong } 382afc51aaaSDarrick J. Wong 383afc51aaaSDarrick J. Wong static loff_t 384afc51aaaSDarrick J. Wong iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, 385afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 386afc51aaaSDarrick J. Wong { 387afc51aaaSDarrick J. Wong struct iomap_readpage_ctx *ctx = data; 388afc51aaaSDarrick J. Wong loff_t done, ret; 389afc51aaaSDarrick J. Wong 390afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 391afc51aaaSDarrick J. Wong if (ctx->cur_page && offset_in_page(pos + done) == 0) { 392afc51aaaSDarrick J. Wong if (!ctx->cur_page_in_bio) 393afc51aaaSDarrick J. Wong unlock_page(ctx->cur_page); 394afc51aaaSDarrick J. Wong put_page(ctx->cur_page); 395afc51aaaSDarrick J. Wong ctx->cur_page = NULL; 396afc51aaaSDarrick J. Wong } 397afc51aaaSDarrick J. Wong if (!ctx->cur_page) { 398afc51aaaSDarrick J. Wong ctx->cur_page = iomap_next_page(inode, ctx->pages, 399afc51aaaSDarrick J. Wong pos, length, &done); 400afc51aaaSDarrick J. Wong if (!ctx->cur_page) 401afc51aaaSDarrick J. Wong break; 402afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = false; 403afc51aaaSDarrick J. Wong } 404afc51aaaSDarrick J. Wong ret = iomap_readpage_actor(inode, pos + done, length - done, 405afc51aaaSDarrick J. Wong ctx, iomap); 406afc51aaaSDarrick J. Wong } 407afc51aaaSDarrick J. Wong 408afc51aaaSDarrick J. Wong return done; 409afc51aaaSDarrick J. Wong } 410afc51aaaSDarrick J. Wong 411afc51aaaSDarrick J. Wong int 412afc51aaaSDarrick J. Wong iomap_readpages(struct address_space *mapping, struct list_head *pages, 413afc51aaaSDarrick J. Wong unsigned nr_pages, const struct iomap_ops *ops) 414afc51aaaSDarrick J. Wong { 415afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 416afc51aaaSDarrick J. Wong .pages = pages, 417afc51aaaSDarrick J. Wong .is_readahead = true, 418afc51aaaSDarrick J. Wong }; 419afc51aaaSDarrick J. Wong loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); 420afc51aaaSDarrick J. Wong loff_t last = page_offset(list_entry(pages->next, struct page, lru)); 421afc51aaaSDarrick J. Wong loff_t length = last - pos + PAGE_SIZE, ret = 0; 422afc51aaaSDarrick J. Wong 4239e91c572SChristoph Hellwig trace_iomap_readpages(mapping->host, nr_pages); 4249e91c572SChristoph Hellwig 425afc51aaaSDarrick J. Wong while (length > 0) { 426afc51aaaSDarrick J. Wong ret = iomap_apply(mapping->host, pos, length, 0, ops, 427afc51aaaSDarrick J. Wong &ctx, iomap_readpages_actor); 428afc51aaaSDarrick J. Wong if (ret <= 0) { 429afc51aaaSDarrick J. Wong WARN_ON_ONCE(ret == 0); 430afc51aaaSDarrick J. Wong goto done; 431afc51aaaSDarrick J. Wong } 432afc51aaaSDarrick J. Wong pos += ret; 433afc51aaaSDarrick J. Wong length -= ret; 434afc51aaaSDarrick J. Wong } 435afc51aaaSDarrick J. Wong ret = 0; 436afc51aaaSDarrick J. Wong done: 437afc51aaaSDarrick J. Wong if (ctx.bio) 438afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 439afc51aaaSDarrick J. Wong if (ctx.cur_page) { 440afc51aaaSDarrick J. Wong if (!ctx.cur_page_in_bio) 441afc51aaaSDarrick J. Wong unlock_page(ctx.cur_page); 442afc51aaaSDarrick J. Wong put_page(ctx.cur_page); 443afc51aaaSDarrick J. Wong } 444afc51aaaSDarrick J. Wong 445afc51aaaSDarrick J. Wong /* 446afc51aaaSDarrick J. Wong * Check that we didn't lose a page due to the arcance calling 447afc51aaaSDarrick J. Wong * conventions.. 448afc51aaaSDarrick J. Wong */ 449afc51aaaSDarrick J. Wong WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); 450afc51aaaSDarrick J. Wong return ret; 451afc51aaaSDarrick J. Wong } 452afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpages); 453afc51aaaSDarrick J. Wong 454afc51aaaSDarrick J. Wong /* 455afc51aaaSDarrick J. Wong * iomap_is_partially_uptodate checks whether blocks within a page are 456afc51aaaSDarrick J. Wong * uptodate or not. 457afc51aaaSDarrick J. Wong * 458afc51aaaSDarrick J. Wong * Returns true if all blocks which correspond to a file portion 459afc51aaaSDarrick J. Wong * we want to read within the page are uptodate. 460afc51aaaSDarrick J. Wong */ 461afc51aaaSDarrick J. Wong int 462afc51aaaSDarrick J. Wong iomap_is_partially_uptodate(struct page *page, unsigned long from, 463afc51aaaSDarrick J. Wong unsigned long count) 464afc51aaaSDarrick J. Wong { 465afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 466afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 467afc51aaaSDarrick J. Wong unsigned len, first, last; 468afc51aaaSDarrick J. Wong unsigned i; 469afc51aaaSDarrick J. Wong 470afc51aaaSDarrick J. Wong /* Limit range to one page */ 471afc51aaaSDarrick J. Wong len = min_t(unsigned, PAGE_SIZE - from, count); 472afc51aaaSDarrick J. Wong 473afc51aaaSDarrick J. Wong /* First and last blocks in range within page */ 474afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 475afc51aaaSDarrick J. Wong last = (from + len - 1) >> inode->i_blkbits; 476afc51aaaSDarrick J. Wong 477afc51aaaSDarrick J. Wong if (iop) { 478afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 479afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 480afc51aaaSDarrick J. Wong return 0; 481afc51aaaSDarrick J. Wong return 1; 482afc51aaaSDarrick J. Wong } 483afc51aaaSDarrick J. Wong 484afc51aaaSDarrick J. Wong return 0; 485afc51aaaSDarrick J. Wong } 486afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 487afc51aaaSDarrick J. Wong 488afc51aaaSDarrick J. Wong int 489afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask) 490afc51aaaSDarrick J. Wong { 4919e91c572SChristoph Hellwig trace_iomap_releasepage(page->mapping->host, page, 0, 0); 4929e91c572SChristoph Hellwig 493afc51aaaSDarrick J. Wong /* 494afc51aaaSDarrick J. Wong * mm accommodates an old ext3 case where clean pages might not have had 495afc51aaaSDarrick J. Wong * the dirty bit cleared. Thus, it can send actual dirty pages to 496afc51aaaSDarrick J. Wong * ->releasepage() via shrink_active_list(), skip those here. 497afc51aaaSDarrick J. Wong */ 498afc51aaaSDarrick J. Wong if (PageDirty(page) || PageWriteback(page)) 499afc51aaaSDarrick J. Wong return 0; 500afc51aaaSDarrick J. Wong iomap_page_release(page); 501afc51aaaSDarrick J. Wong return 1; 502afc51aaaSDarrick J. Wong } 503afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage); 504afc51aaaSDarrick J. Wong 505afc51aaaSDarrick J. Wong void 506afc51aaaSDarrick J. Wong iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 507afc51aaaSDarrick J. Wong { 5089e91c572SChristoph Hellwig trace_iomap_invalidatepage(page->mapping->host, page, offset, len); 5099e91c572SChristoph Hellwig 510afc51aaaSDarrick J. Wong /* 511afc51aaaSDarrick J. Wong * If we are invalidating the entire page, clear the dirty state from it 512afc51aaaSDarrick J. Wong * and release it to avoid unnecessary buildup of the LRU. 513afc51aaaSDarrick J. Wong */ 514afc51aaaSDarrick J. Wong if (offset == 0 && len == PAGE_SIZE) { 515afc51aaaSDarrick J. Wong WARN_ON_ONCE(PageWriteback(page)); 516afc51aaaSDarrick J. Wong cancel_dirty_page(page); 517afc51aaaSDarrick J. Wong iomap_page_release(page); 518afc51aaaSDarrick J. Wong } 519afc51aaaSDarrick J. Wong } 520afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_invalidatepage); 521afc51aaaSDarrick J. Wong 522afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION 523afc51aaaSDarrick J. Wong int 524afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage, 525afc51aaaSDarrick J. Wong struct page *page, enum migrate_mode mode) 526afc51aaaSDarrick J. Wong { 527afc51aaaSDarrick J. Wong int ret; 528afc51aaaSDarrick J. Wong 52926473f83SLinus Torvalds ret = migrate_page_move_mapping(mapping, newpage, page, 0); 530afc51aaaSDarrick J. Wong if (ret != MIGRATEPAGE_SUCCESS) 531afc51aaaSDarrick J. Wong return ret; 532afc51aaaSDarrick J. Wong 533afc51aaaSDarrick J. Wong if (page_has_private(page)) { 534afc51aaaSDarrick J. Wong ClearPagePrivate(page); 535afc51aaaSDarrick J. Wong get_page(newpage); 536afc51aaaSDarrick J. Wong set_page_private(newpage, page_private(page)); 537afc51aaaSDarrick J. Wong set_page_private(page, 0); 538afc51aaaSDarrick J. Wong put_page(page); 539afc51aaaSDarrick J. Wong SetPagePrivate(newpage); 540afc51aaaSDarrick J. Wong } 541afc51aaaSDarrick J. Wong 542afc51aaaSDarrick J. Wong if (mode != MIGRATE_SYNC_NO_COPY) 543afc51aaaSDarrick J. Wong migrate_page_copy(newpage, page); 544afc51aaaSDarrick J. Wong else 545afc51aaaSDarrick J. Wong migrate_page_states(newpage, page); 546afc51aaaSDarrick J. Wong return MIGRATEPAGE_SUCCESS; 547afc51aaaSDarrick J. Wong } 548afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page); 549afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */ 550afc51aaaSDarrick J. Wong 551afc51aaaSDarrick J. Wong static void 552afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 553afc51aaaSDarrick J. Wong { 554afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 555afc51aaaSDarrick J. Wong 556afc51aaaSDarrick J. Wong /* 557afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 558afc51aaaSDarrick J. Wong * write started inside the existing inode size. 559afc51aaaSDarrick J. Wong */ 560afc51aaaSDarrick J. Wong if (pos + len > i_size) 561afc51aaaSDarrick J. Wong truncate_pagecache_range(inode, max(pos, i_size), pos + len); 562afc51aaaSDarrick J. Wong } 563afc51aaaSDarrick J. Wong 564afc51aaaSDarrick J. Wong static int 565afc51aaaSDarrick J. Wong iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, 566afc51aaaSDarrick J. Wong unsigned poff, unsigned plen, unsigned from, unsigned to, 567afc51aaaSDarrick J. Wong struct iomap *iomap) 568afc51aaaSDarrick J. Wong { 569afc51aaaSDarrick J. Wong struct bio_vec bvec; 570afc51aaaSDarrick J. Wong struct bio bio; 571afc51aaaSDarrick J. Wong 572009d8d84SChristoph Hellwig if (iomap_block_needs_zeroing(inode, iomap, block_start)) { 573afc51aaaSDarrick J. Wong zero_user_segments(page, poff, from, to, poff + plen); 574afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, poff, plen); 575afc51aaaSDarrick J. Wong return 0; 576afc51aaaSDarrick J. Wong } 577afc51aaaSDarrick J. Wong 578afc51aaaSDarrick J. Wong bio_init(&bio, &bvec, 1); 579afc51aaaSDarrick J. Wong bio.bi_opf = REQ_OP_READ; 580afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 581afc51aaaSDarrick J. Wong bio_set_dev(&bio, iomap->bdev); 582afc51aaaSDarrick J. Wong __bio_add_page(&bio, page, plen, poff); 583afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 584afc51aaaSDarrick J. Wong } 585afc51aaaSDarrick J. Wong 586afc51aaaSDarrick J. Wong static int 587afc51aaaSDarrick J. Wong __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, 588afc51aaaSDarrick J. Wong struct page *page, struct iomap *iomap) 589afc51aaaSDarrick J. Wong { 590afc51aaaSDarrick J. Wong struct iomap_page *iop = iomap_page_create(inode, page); 591afc51aaaSDarrick J. Wong loff_t block_size = i_blocksize(inode); 592afc51aaaSDarrick J. Wong loff_t block_start = pos & ~(block_size - 1); 593afc51aaaSDarrick J. Wong loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); 594afc51aaaSDarrick J. Wong unsigned from = offset_in_page(pos), to = from + len, poff, plen; 595afc51aaaSDarrick J. Wong int status = 0; 596afc51aaaSDarrick J. Wong 597afc51aaaSDarrick J. Wong if (PageUptodate(page)) 598afc51aaaSDarrick J. Wong return 0; 599afc51aaaSDarrick J. Wong 600afc51aaaSDarrick J. Wong do { 601afc51aaaSDarrick J. Wong iomap_adjust_read_range(inode, iop, &block_start, 602afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 603afc51aaaSDarrick J. Wong if (plen == 0) 604afc51aaaSDarrick J. Wong break; 605afc51aaaSDarrick J. Wong 606afc51aaaSDarrick J. Wong if ((from > poff && from < poff + plen) || 607afc51aaaSDarrick J. Wong (to > poff && to < poff + plen)) { 608afc51aaaSDarrick J. Wong status = iomap_read_page_sync(inode, block_start, page, 609afc51aaaSDarrick J. Wong poff, plen, from, to, iomap); 610afc51aaaSDarrick J. Wong if (status) 611afc51aaaSDarrick J. Wong break; 612afc51aaaSDarrick J. Wong } 613afc51aaaSDarrick J. Wong 614afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 615afc51aaaSDarrick J. Wong 616afc51aaaSDarrick J. Wong return status; 617afc51aaaSDarrick J. Wong } 618afc51aaaSDarrick J. Wong 619afc51aaaSDarrick J. Wong static int 620afc51aaaSDarrick J. Wong iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, 621afc51aaaSDarrick J. Wong struct page **pagep, struct iomap *iomap) 622afc51aaaSDarrick J. Wong { 623afc51aaaSDarrick J. Wong const struct iomap_page_ops *page_ops = iomap->page_ops; 624afc51aaaSDarrick J. Wong struct page *page; 625afc51aaaSDarrick J. Wong int status = 0; 626afc51aaaSDarrick J. Wong 627afc51aaaSDarrick J. Wong BUG_ON(pos + len > iomap->offset + iomap->length); 628afc51aaaSDarrick J. Wong 629afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 630afc51aaaSDarrick J. Wong return -EINTR; 631afc51aaaSDarrick J. Wong 632afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_prepare) { 633afc51aaaSDarrick J. Wong status = page_ops->page_prepare(inode, pos, len, iomap); 634afc51aaaSDarrick J. Wong if (status) 635afc51aaaSDarrick J. Wong return status; 636afc51aaaSDarrick J. Wong } 637afc51aaaSDarrick J. Wong 638dcd6158dSChristoph Hellwig page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT, 639dcd6158dSChristoph Hellwig AOP_FLAG_NOFS); 640afc51aaaSDarrick J. Wong if (!page) { 641afc51aaaSDarrick J. Wong status = -ENOMEM; 642afc51aaaSDarrick J. Wong goto out_no_page; 643afc51aaaSDarrick J. Wong } 644afc51aaaSDarrick J. Wong 645afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) 646afc51aaaSDarrick J. Wong iomap_read_inline_data(inode, page, iomap); 647afc51aaaSDarrick J. Wong else if (iomap->flags & IOMAP_F_BUFFER_HEAD) 648afc51aaaSDarrick J. Wong status = __block_write_begin_int(page, pos, len, NULL, iomap); 649afc51aaaSDarrick J. Wong else 650afc51aaaSDarrick J. Wong status = __iomap_write_begin(inode, pos, len, page, iomap); 651afc51aaaSDarrick J. Wong 652afc51aaaSDarrick J. Wong if (unlikely(status)) 653afc51aaaSDarrick J. Wong goto out_unlock; 654afc51aaaSDarrick J. Wong 655afc51aaaSDarrick J. Wong *pagep = page; 656afc51aaaSDarrick J. Wong return 0; 657afc51aaaSDarrick J. Wong 658afc51aaaSDarrick J. Wong out_unlock: 659afc51aaaSDarrick J. Wong unlock_page(page); 660afc51aaaSDarrick J. Wong put_page(page); 661afc51aaaSDarrick J. Wong iomap_write_failed(inode, pos, len); 662afc51aaaSDarrick J. Wong 663afc51aaaSDarrick J. Wong out_no_page: 664afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 665afc51aaaSDarrick J. Wong page_ops->page_done(inode, pos, 0, NULL, iomap); 666afc51aaaSDarrick J. Wong return status; 667afc51aaaSDarrick J. Wong } 668afc51aaaSDarrick J. Wong 669afc51aaaSDarrick J. Wong int 670afc51aaaSDarrick J. Wong iomap_set_page_dirty(struct page *page) 671afc51aaaSDarrick J. Wong { 672afc51aaaSDarrick J. Wong struct address_space *mapping = page_mapping(page); 673afc51aaaSDarrick J. Wong int newly_dirty; 674afc51aaaSDarrick J. Wong 675afc51aaaSDarrick J. Wong if (unlikely(!mapping)) 676afc51aaaSDarrick J. Wong return !TestSetPageDirty(page); 677afc51aaaSDarrick J. Wong 678afc51aaaSDarrick J. Wong /* 679afc51aaaSDarrick J. Wong * Lock out page->mem_cgroup migration to keep PageDirty 680afc51aaaSDarrick J. Wong * synchronized with per-memcg dirty page counters. 681afc51aaaSDarrick J. Wong */ 682afc51aaaSDarrick J. Wong lock_page_memcg(page); 683afc51aaaSDarrick J. Wong newly_dirty = !TestSetPageDirty(page); 684afc51aaaSDarrick J. Wong if (newly_dirty) 685afc51aaaSDarrick J. Wong __set_page_dirty(page, mapping, 0); 686afc51aaaSDarrick J. Wong unlock_page_memcg(page); 687afc51aaaSDarrick J. Wong 688afc51aaaSDarrick J. Wong if (newly_dirty) 689afc51aaaSDarrick J. Wong __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 690afc51aaaSDarrick J. Wong return newly_dirty; 691afc51aaaSDarrick J. Wong } 692afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_set_page_dirty); 693afc51aaaSDarrick J. Wong 694afc51aaaSDarrick J. Wong static int 695afc51aaaSDarrick J. Wong __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 696c12d6fa8SChristoph Hellwig unsigned copied, struct page *page) 697afc51aaaSDarrick J. Wong { 698afc51aaaSDarrick J. Wong flush_dcache_page(page); 699afc51aaaSDarrick J. Wong 700afc51aaaSDarrick J. Wong /* 701afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 702afc51aaaSDarrick J. Wong * don't have to worry about a readpage reading them and overwriting a 703afc51aaaSDarrick J. Wong * partial write. However if we have encountered a short write and only 704afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 705afc51aaaSDarrick J. Wong * readpage might come in and destroy our partial write. 706afc51aaaSDarrick J. Wong * 707afc51aaaSDarrick J. Wong * Do the simplest thing, and just treat any short write to a non 708afc51aaaSDarrick J. Wong * uptodate page as a zero-length write, and force the caller to redo 709afc51aaaSDarrick J. Wong * the whole thing. 710afc51aaaSDarrick J. Wong */ 711afc51aaaSDarrick J. Wong if (unlikely(copied < len && !PageUptodate(page))) 712afc51aaaSDarrick J. Wong return 0; 713afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, offset_in_page(pos), len); 714afc51aaaSDarrick J. Wong iomap_set_page_dirty(page); 715afc51aaaSDarrick J. Wong return copied; 716afc51aaaSDarrick J. Wong } 717afc51aaaSDarrick J. Wong 718afc51aaaSDarrick J. Wong static int 719afc51aaaSDarrick J. Wong iomap_write_end_inline(struct inode *inode, struct page *page, 720afc51aaaSDarrick J. Wong struct iomap *iomap, loff_t pos, unsigned copied) 721afc51aaaSDarrick J. Wong { 722afc51aaaSDarrick J. Wong void *addr; 723afc51aaaSDarrick J. Wong 724afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 725afc51aaaSDarrick J. Wong BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); 726afc51aaaSDarrick J. Wong 727afc51aaaSDarrick J. Wong addr = kmap_atomic(page); 728afc51aaaSDarrick J. Wong memcpy(iomap->inline_data + pos, addr + pos, copied); 729afc51aaaSDarrick J. Wong kunmap_atomic(addr); 730afc51aaaSDarrick J. Wong 731afc51aaaSDarrick J. Wong mark_inode_dirty(inode); 732afc51aaaSDarrick J. Wong return copied; 733afc51aaaSDarrick J. Wong } 734afc51aaaSDarrick J. Wong 735afc51aaaSDarrick J. Wong static int 736afc51aaaSDarrick J. Wong iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 737afc51aaaSDarrick J. Wong unsigned copied, struct page *page, struct iomap *iomap) 738afc51aaaSDarrick J. Wong { 739afc51aaaSDarrick J. Wong const struct iomap_page_ops *page_ops = iomap->page_ops; 740afc51aaaSDarrick J. Wong loff_t old_size = inode->i_size; 741afc51aaaSDarrick J. Wong int ret; 742afc51aaaSDarrick J. Wong 743afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) { 744afc51aaaSDarrick J. Wong ret = iomap_write_end_inline(inode, page, iomap, pos, copied); 745afc51aaaSDarrick J. Wong } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 746afc51aaaSDarrick J. Wong ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, 747afc51aaaSDarrick J. Wong page, NULL); 748afc51aaaSDarrick J. Wong } else { 749c12d6fa8SChristoph Hellwig ret = __iomap_write_end(inode, pos, len, copied, page); 750afc51aaaSDarrick J. Wong } 751afc51aaaSDarrick J. Wong 752afc51aaaSDarrick J. Wong /* 753afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 754afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 755afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 756afc51aaaSDarrick J. Wong */ 757afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 758afc51aaaSDarrick J. Wong i_size_write(inode, pos + ret); 759afc51aaaSDarrick J. Wong iomap->flags |= IOMAP_F_SIZE_CHANGED; 760afc51aaaSDarrick J. Wong } 761afc51aaaSDarrick J. Wong unlock_page(page); 762afc51aaaSDarrick J. Wong 763afc51aaaSDarrick J. Wong if (old_size < pos) 764afc51aaaSDarrick J. Wong pagecache_isize_extended(inode, old_size, pos); 765afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 766afc51aaaSDarrick J. Wong page_ops->page_done(inode, pos, ret, page, iomap); 767afc51aaaSDarrick J. Wong put_page(page); 768afc51aaaSDarrick J. Wong 769afc51aaaSDarrick J. Wong if (ret < len) 770afc51aaaSDarrick J. Wong iomap_write_failed(inode, pos, len); 771afc51aaaSDarrick J. Wong return ret; 772afc51aaaSDarrick J. Wong } 773afc51aaaSDarrick J. Wong 774afc51aaaSDarrick J. Wong static loff_t 775afc51aaaSDarrick J. Wong iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 776afc51aaaSDarrick J. Wong struct iomap *iomap) 777afc51aaaSDarrick J. Wong { 778afc51aaaSDarrick J. Wong struct iov_iter *i = data; 779afc51aaaSDarrick J. Wong long status = 0; 780afc51aaaSDarrick J. Wong ssize_t written = 0; 781afc51aaaSDarrick J. Wong 782afc51aaaSDarrick J. Wong do { 783afc51aaaSDarrick J. Wong struct page *page; 784afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 785afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 786afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 787afc51aaaSDarrick J. Wong 788afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 789afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 790afc51aaaSDarrick J. Wong iov_iter_count(i)); 791afc51aaaSDarrick J. Wong again: 792afc51aaaSDarrick J. Wong if (bytes > length) 793afc51aaaSDarrick J. Wong bytes = length; 794afc51aaaSDarrick J. Wong 795afc51aaaSDarrick J. Wong /* 796afc51aaaSDarrick J. Wong * Bring in the user page that we will copy from _first_. 797afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 798afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 799afc51aaaSDarrick J. Wong * up-to-date. 800afc51aaaSDarrick J. Wong * 801afc51aaaSDarrick J. Wong * Not only is this an optimisation, but it is also required 802afc51aaaSDarrick J. Wong * to check that the address is actually valid, when atomic 803afc51aaaSDarrick J. Wong * usercopies are used, below. 804afc51aaaSDarrick J. Wong */ 805afc51aaaSDarrick J. Wong if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 806afc51aaaSDarrick J. Wong status = -EFAULT; 807afc51aaaSDarrick J. Wong break; 808afc51aaaSDarrick J. Wong } 809afc51aaaSDarrick J. Wong 810dcd6158dSChristoph Hellwig status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap); 811afc51aaaSDarrick J. Wong if (unlikely(status)) 812afc51aaaSDarrick J. Wong break; 813afc51aaaSDarrick J. Wong 814afc51aaaSDarrick J. Wong if (mapping_writably_mapped(inode->i_mapping)) 815afc51aaaSDarrick J. Wong flush_dcache_page(page); 816afc51aaaSDarrick J. Wong 817afc51aaaSDarrick J. Wong copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 818afc51aaaSDarrick J. Wong 819afc51aaaSDarrick J. Wong flush_dcache_page(page); 820afc51aaaSDarrick J. Wong 821afc51aaaSDarrick J. Wong status = iomap_write_end(inode, pos, bytes, copied, page, 822afc51aaaSDarrick J. Wong iomap); 823afc51aaaSDarrick J. Wong if (unlikely(status < 0)) 824afc51aaaSDarrick J. Wong break; 825afc51aaaSDarrick J. Wong copied = status; 826afc51aaaSDarrick J. Wong 827afc51aaaSDarrick J. Wong cond_resched(); 828afc51aaaSDarrick J. Wong 829afc51aaaSDarrick J. Wong iov_iter_advance(i, copied); 830afc51aaaSDarrick J. Wong if (unlikely(copied == 0)) { 831afc51aaaSDarrick J. Wong /* 832afc51aaaSDarrick J. Wong * If we were unable to copy any data at all, we must 833afc51aaaSDarrick J. Wong * fall back to a single segment length write. 834afc51aaaSDarrick J. Wong * 835afc51aaaSDarrick J. Wong * If we didn't fallback here, we could livelock 836afc51aaaSDarrick J. Wong * because not all segments in the iov can be copied at 837afc51aaaSDarrick J. Wong * once without a pagefault. 838afc51aaaSDarrick J. Wong */ 839afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 840afc51aaaSDarrick J. Wong iov_iter_single_seg_count(i)); 841afc51aaaSDarrick J. Wong goto again; 842afc51aaaSDarrick J. Wong } 843afc51aaaSDarrick J. Wong pos += copied; 844afc51aaaSDarrick J. Wong written += copied; 845afc51aaaSDarrick J. Wong length -= copied; 846afc51aaaSDarrick J. Wong 847afc51aaaSDarrick J. Wong balance_dirty_pages_ratelimited(inode->i_mapping); 848afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 849afc51aaaSDarrick J. Wong 850afc51aaaSDarrick J. Wong return written ? written : status; 851afc51aaaSDarrick J. Wong } 852afc51aaaSDarrick J. Wong 853afc51aaaSDarrick J. Wong ssize_t 854afc51aaaSDarrick J. Wong iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, 855afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 856afc51aaaSDarrick J. Wong { 857afc51aaaSDarrick J. Wong struct inode *inode = iocb->ki_filp->f_mapping->host; 858afc51aaaSDarrick J. Wong loff_t pos = iocb->ki_pos, ret = 0, written = 0; 859afc51aaaSDarrick J. Wong 860afc51aaaSDarrick J. Wong while (iov_iter_count(iter)) { 861afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, iov_iter_count(iter), 862afc51aaaSDarrick J. Wong IOMAP_WRITE, ops, iter, iomap_write_actor); 863afc51aaaSDarrick J. Wong if (ret <= 0) 864afc51aaaSDarrick J. Wong break; 865afc51aaaSDarrick J. Wong pos += ret; 866afc51aaaSDarrick J. Wong written += ret; 867afc51aaaSDarrick J. Wong } 868afc51aaaSDarrick J. Wong 869afc51aaaSDarrick J. Wong return written ? written : ret; 870afc51aaaSDarrick J. Wong } 871afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 872afc51aaaSDarrick J. Wong 873afc51aaaSDarrick J. Wong static struct page * 874afc51aaaSDarrick J. Wong __iomap_read_page(struct inode *inode, loff_t offset) 875afc51aaaSDarrick J. Wong { 876afc51aaaSDarrick J. Wong struct address_space *mapping = inode->i_mapping; 877afc51aaaSDarrick J. Wong struct page *page; 878afc51aaaSDarrick J. Wong 879afc51aaaSDarrick J. Wong page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); 880afc51aaaSDarrick J. Wong if (IS_ERR(page)) 881afc51aaaSDarrick J. Wong return page; 882afc51aaaSDarrick J. Wong if (!PageUptodate(page)) { 883afc51aaaSDarrick J. Wong put_page(page); 884afc51aaaSDarrick J. Wong return ERR_PTR(-EIO); 885afc51aaaSDarrick J. Wong } 886afc51aaaSDarrick J. Wong return page; 887afc51aaaSDarrick J. Wong } 888afc51aaaSDarrick J. Wong 889afc51aaaSDarrick J. Wong static loff_t 890*3590c4d8SChristoph Hellwig iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 891afc51aaaSDarrick J. Wong struct iomap *iomap) 892afc51aaaSDarrick J. Wong { 893afc51aaaSDarrick J. Wong long status = 0; 894afc51aaaSDarrick J. Wong ssize_t written = 0; 895afc51aaaSDarrick J. Wong 896*3590c4d8SChristoph Hellwig /* don't bother with blocks that are not shared to start with */ 897*3590c4d8SChristoph Hellwig if (!(iomap->flags & IOMAP_F_SHARED)) 898*3590c4d8SChristoph Hellwig return length; 899*3590c4d8SChristoph Hellwig /* don't bother with holes or unwritten extents */ 900*3590c4d8SChristoph Hellwig if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 901*3590c4d8SChristoph Hellwig return length; 902*3590c4d8SChristoph Hellwig 903afc51aaaSDarrick J. Wong do { 904afc51aaaSDarrick J. Wong struct page *page, *rpage; 905afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 906afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 907afc51aaaSDarrick J. Wong 908afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 909afc51aaaSDarrick J. Wong bytes = min_t(loff_t, PAGE_SIZE - offset, length); 910afc51aaaSDarrick J. Wong 911afc51aaaSDarrick J. Wong rpage = __iomap_read_page(inode, pos); 912afc51aaaSDarrick J. Wong if (IS_ERR(rpage)) 913afc51aaaSDarrick J. Wong return PTR_ERR(rpage); 914afc51aaaSDarrick J. Wong 915dcd6158dSChristoph Hellwig status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap); 916afc51aaaSDarrick J. Wong put_page(rpage); 917afc51aaaSDarrick J. Wong if (unlikely(status)) 918afc51aaaSDarrick J. Wong return status; 919afc51aaaSDarrick J. Wong 920afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 921afc51aaaSDarrick J. Wong 922afc51aaaSDarrick J. Wong status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); 923afc51aaaSDarrick J. Wong if (unlikely(status <= 0)) { 924afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 925afc51aaaSDarrick J. Wong return -EIO; 926afc51aaaSDarrick J. Wong return status; 927afc51aaaSDarrick J. Wong } 928afc51aaaSDarrick J. Wong 929afc51aaaSDarrick J. Wong cond_resched(); 930afc51aaaSDarrick J. Wong 931afc51aaaSDarrick J. Wong pos += status; 932afc51aaaSDarrick J. Wong written += status; 933afc51aaaSDarrick J. Wong length -= status; 934afc51aaaSDarrick J. Wong 935afc51aaaSDarrick J. Wong balance_dirty_pages_ratelimited(inode->i_mapping); 936afc51aaaSDarrick J. Wong } while (length); 937afc51aaaSDarrick J. Wong 938afc51aaaSDarrick J. Wong return written; 939afc51aaaSDarrick J. Wong } 940afc51aaaSDarrick J. Wong 941afc51aaaSDarrick J. Wong int 942*3590c4d8SChristoph Hellwig iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 943afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 944afc51aaaSDarrick J. Wong { 945afc51aaaSDarrick J. Wong loff_t ret; 946afc51aaaSDarrick J. Wong 947afc51aaaSDarrick J. Wong while (len) { 948afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, 949*3590c4d8SChristoph Hellwig iomap_unshare_actor); 950afc51aaaSDarrick J. Wong if (ret <= 0) 951afc51aaaSDarrick J. Wong return ret; 952afc51aaaSDarrick J. Wong pos += ret; 953afc51aaaSDarrick J. Wong len -= ret; 954afc51aaaSDarrick J. Wong } 955afc51aaaSDarrick J. Wong 956afc51aaaSDarrick J. Wong return 0; 957afc51aaaSDarrick J. Wong } 958*3590c4d8SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_file_unshare); 959afc51aaaSDarrick J. Wong 960afc51aaaSDarrick J. Wong static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, 961afc51aaaSDarrick J. Wong unsigned bytes, struct iomap *iomap) 962afc51aaaSDarrick J. Wong { 963afc51aaaSDarrick J. Wong struct page *page; 964afc51aaaSDarrick J. Wong int status; 965afc51aaaSDarrick J. Wong 966dcd6158dSChristoph Hellwig status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap); 967afc51aaaSDarrick J. Wong if (status) 968afc51aaaSDarrick J. Wong return status; 969afc51aaaSDarrick J. Wong 970afc51aaaSDarrick J. Wong zero_user(page, offset, bytes); 971afc51aaaSDarrick J. Wong mark_page_accessed(page); 972afc51aaaSDarrick J. Wong 973afc51aaaSDarrick J. Wong return iomap_write_end(inode, pos, bytes, bytes, page, iomap); 974afc51aaaSDarrick J. Wong } 975afc51aaaSDarrick J. Wong 976afc51aaaSDarrick J. Wong static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, 977afc51aaaSDarrick J. Wong struct iomap *iomap) 978afc51aaaSDarrick J. Wong { 979afc51aaaSDarrick J. Wong return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, 980afc51aaaSDarrick J. Wong iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); 981afc51aaaSDarrick J. Wong } 982afc51aaaSDarrick J. Wong 983afc51aaaSDarrick J. Wong static loff_t 984afc51aaaSDarrick J. Wong iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, 985afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 986afc51aaaSDarrick J. Wong { 987afc51aaaSDarrick J. Wong bool *did_zero = data; 988afc51aaaSDarrick J. Wong loff_t written = 0; 989afc51aaaSDarrick J. Wong int status; 990afc51aaaSDarrick J. Wong 991afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 992afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 993afc51aaaSDarrick J. Wong return count; 994afc51aaaSDarrick J. Wong 995afc51aaaSDarrick J. Wong do { 996afc51aaaSDarrick J. Wong unsigned offset, bytes; 997afc51aaaSDarrick J. Wong 998afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 999afc51aaaSDarrick J. Wong bytes = min_t(loff_t, PAGE_SIZE - offset, count); 1000afc51aaaSDarrick J. Wong 1001afc51aaaSDarrick J. Wong if (IS_DAX(inode)) 1002afc51aaaSDarrick J. Wong status = iomap_dax_zero(pos, offset, bytes, iomap); 1003afc51aaaSDarrick J. Wong else 1004afc51aaaSDarrick J. Wong status = iomap_zero(inode, pos, offset, bytes, iomap); 1005afc51aaaSDarrick J. Wong if (status < 0) 1006afc51aaaSDarrick J. Wong return status; 1007afc51aaaSDarrick J. Wong 1008afc51aaaSDarrick J. Wong pos += bytes; 1009afc51aaaSDarrick J. Wong count -= bytes; 1010afc51aaaSDarrick J. Wong written += bytes; 1011afc51aaaSDarrick J. Wong if (did_zero) 1012afc51aaaSDarrick J. Wong *did_zero = true; 1013afc51aaaSDarrick J. Wong } while (count > 0); 1014afc51aaaSDarrick J. Wong 1015afc51aaaSDarrick J. Wong return written; 1016afc51aaaSDarrick J. Wong } 1017afc51aaaSDarrick J. Wong 1018afc51aaaSDarrick J. Wong int 1019afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1020afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1021afc51aaaSDarrick J. Wong { 1022afc51aaaSDarrick J. Wong loff_t ret; 1023afc51aaaSDarrick J. Wong 1024afc51aaaSDarrick J. Wong while (len > 0) { 1025afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, len, IOMAP_ZERO, 1026afc51aaaSDarrick J. Wong ops, did_zero, iomap_zero_range_actor); 1027afc51aaaSDarrick J. Wong if (ret <= 0) 1028afc51aaaSDarrick J. Wong return ret; 1029afc51aaaSDarrick J. Wong 1030afc51aaaSDarrick J. Wong pos += ret; 1031afc51aaaSDarrick J. Wong len -= ret; 1032afc51aaaSDarrick J. Wong } 1033afc51aaaSDarrick J. Wong 1034afc51aaaSDarrick J. Wong return 0; 1035afc51aaaSDarrick J. Wong } 1036afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 1037afc51aaaSDarrick J. Wong 1038afc51aaaSDarrick J. Wong int 1039afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1040afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1041afc51aaaSDarrick J. Wong { 1042afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 1043afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 1044afc51aaaSDarrick J. Wong 1045afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 1046afc51aaaSDarrick J. Wong if (!off) 1047afc51aaaSDarrick J. Wong return 0; 1048afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1049afc51aaaSDarrick J. Wong } 1050afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 1051afc51aaaSDarrick J. Wong 1052afc51aaaSDarrick J. Wong static loff_t 1053afc51aaaSDarrick J. Wong iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, 1054afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 1055afc51aaaSDarrick J. Wong { 1056afc51aaaSDarrick J. Wong struct page *page = data; 1057afc51aaaSDarrick J. Wong int ret; 1058afc51aaaSDarrick J. Wong 1059afc51aaaSDarrick J. Wong if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 1060afc51aaaSDarrick J. Wong ret = __block_write_begin_int(page, pos, length, NULL, iomap); 1061afc51aaaSDarrick J. Wong if (ret) 1062afc51aaaSDarrick J. Wong return ret; 1063afc51aaaSDarrick J. Wong block_commit_write(page, 0, length); 1064afc51aaaSDarrick J. Wong } else { 1065afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 1066afc51aaaSDarrick J. Wong iomap_page_create(inode, page); 1067afc51aaaSDarrick J. Wong set_page_dirty(page); 1068afc51aaaSDarrick J. Wong } 1069afc51aaaSDarrick J. Wong 1070afc51aaaSDarrick J. Wong return length; 1071afc51aaaSDarrick J. Wong } 1072afc51aaaSDarrick J. Wong 1073afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1074afc51aaaSDarrick J. Wong { 1075afc51aaaSDarrick J. Wong struct page *page = vmf->page; 1076afc51aaaSDarrick J. Wong struct inode *inode = file_inode(vmf->vma->vm_file); 1077afc51aaaSDarrick J. Wong unsigned long length; 1078afc51aaaSDarrick J. Wong loff_t offset, size; 1079afc51aaaSDarrick J. Wong ssize_t ret; 1080afc51aaaSDarrick J. Wong 1081afc51aaaSDarrick J. Wong lock_page(page); 1082afc51aaaSDarrick J. Wong size = i_size_read(inode); 1083afc51aaaSDarrick J. Wong if ((page->mapping != inode->i_mapping) || 1084afc51aaaSDarrick J. Wong (page_offset(page) > size)) { 1085afc51aaaSDarrick J. Wong /* We overload EFAULT to mean page got truncated */ 1086afc51aaaSDarrick J. Wong ret = -EFAULT; 1087afc51aaaSDarrick J. Wong goto out_unlock; 1088afc51aaaSDarrick J. Wong } 1089afc51aaaSDarrick J. Wong 1090afc51aaaSDarrick J. Wong /* page is wholly or partially inside EOF */ 1091afc51aaaSDarrick J. Wong if (((page->index + 1) << PAGE_SHIFT) > size) 1092afc51aaaSDarrick J. Wong length = offset_in_page(size); 1093afc51aaaSDarrick J. Wong else 1094afc51aaaSDarrick J. Wong length = PAGE_SIZE; 1095afc51aaaSDarrick J. Wong 1096afc51aaaSDarrick J. Wong offset = page_offset(page); 1097afc51aaaSDarrick J. Wong while (length > 0) { 1098afc51aaaSDarrick J. Wong ret = iomap_apply(inode, offset, length, 1099afc51aaaSDarrick J. Wong IOMAP_WRITE | IOMAP_FAULT, ops, page, 1100afc51aaaSDarrick J. Wong iomap_page_mkwrite_actor); 1101afc51aaaSDarrick J. Wong if (unlikely(ret <= 0)) 1102afc51aaaSDarrick J. Wong goto out_unlock; 1103afc51aaaSDarrick J. Wong offset += ret; 1104afc51aaaSDarrick J. Wong length -= ret; 1105afc51aaaSDarrick J. Wong } 1106afc51aaaSDarrick J. Wong 1107afc51aaaSDarrick J. Wong wait_for_stable_page(page); 1108afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1109afc51aaaSDarrick J. Wong out_unlock: 1110afc51aaaSDarrick J. Wong unlock_page(page); 1111afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1112afc51aaaSDarrick J. Wong } 1113afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1114598ecfbaSChristoph Hellwig 1115598ecfbaSChristoph Hellwig static void 111648d64cd1SChristoph Hellwig iomap_finish_page_writeback(struct inode *inode, struct page *page, 1117598ecfbaSChristoph Hellwig int error) 1118598ecfbaSChristoph Hellwig { 111948d64cd1SChristoph Hellwig struct iomap_page *iop = to_iomap_page(page); 1120598ecfbaSChristoph Hellwig 1121598ecfbaSChristoph Hellwig if (error) { 112248d64cd1SChristoph Hellwig SetPageError(page); 1123598ecfbaSChristoph Hellwig mapping_set_error(inode->i_mapping, -EIO); 1124598ecfbaSChristoph Hellwig } 1125598ecfbaSChristoph Hellwig 1126598ecfbaSChristoph Hellwig WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); 1127598ecfbaSChristoph Hellwig WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0); 1128598ecfbaSChristoph Hellwig 1129598ecfbaSChristoph Hellwig if (!iop || atomic_dec_and_test(&iop->write_count)) 113048d64cd1SChristoph Hellwig end_page_writeback(page); 1131598ecfbaSChristoph Hellwig } 1132598ecfbaSChristoph Hellwig 1133598ecfbaSChristoph Hellwig /* 1134598ecfbaSChristoph Hellwig * We're now finished for good with this ioend structure. Update the page 1135598ecfbaSChristoph Hellwig * state, release holds on bios, and finally free up memory. Do not use the 1136598ecfbaSChristoph Hellwig * ioend after this. 1137598ecfbaSChristoph Hellwig */ 1138598ecfbaSChristoph Hellwig static void 1139598ecfbaSChristoph Hellwig iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1140598ecfbaSChristoph Hellwig { 1141598ecfbaSChristoph Hellwig struct inode *inode = ioend->io_inode; 1142598ecfbaSChristoph Hellwig struct bio *bio = &ioend->io_inline_bio; 1143598ecfbaSChristoph Hellwig struct bio *last = ioend->io_bio, *next; 1144598ecfbaSChristoph Hellwig u64 start = bio->bi_iter.bi_sector; 1145598ecfbaSChristoph Hellwig bool quiet = bio_flagged(bio, BIO_QUIET); 1146598ecfbaSChristoph Hellwig 1147598ecfbaSChristoph Hellwig for (bio = &ioend->io_inline_bio; bio; bio = next) { 1148598ecfbaSChristoph Hellwig struct bio_vec *bv; 1149598ecfbaSChristoph Hellwig struct bvec_iter_all iter_all; 1150598ecfbaSChristoph Hellwig 1151598ecfbaSChristoph Hellwig /* 1152598ecfbaSChristoph Hellwig * For the last bio, bi_private points to the ioend, so we 1153598ecfbaSChristoph Hellwig * need to explicitly end the iteration here. 1154598ecfbaSChristoph Hellwig */ 1155598ecfbaSChristoph Hellwig if (bio == last) 1156598ecfbaSChristoph Hellwig next = NULL; 1157598ecfbaSChristoph Hellwig else 1158598ecfbaSChristoph Hellwig next = bio->bi_private; 1159598ecfbaSChristoph Hellwig 1160598ecfbaSChristoph Hellwig /* walk each page on bio, ending page IO on them */ 1161598ecfbaSChristoph Hellwig bio_for_each_segment_all(bv, bio, iter_all) 116248d64cd1SChristoph Hellwig iomap_finish_page_writeback(inode, bv->bv_page, error); 1163598ecfbaSChristoph Hellwig bio_put(bio); 1164598ecfbaSChristoph Hellwig } 1165598ecfbaSChristoph Hellwig 1166598ecfbaSChristoph Hellwig if (unlikely(error && !quiet)) { 1167598ecfbaSChristoph Hellwig printk_ratelimited(KERN_ERR 11689cd0ed63SDarrick J. Wong "%s: writeback error on inode %lu, offset %lld, sector %llu", 11699cd0ed63SDarrick J. Wong inode->i_sb->s_id, inode->i_ino, ioend->io_offset, 11709cd0ed63SDarrick J. Wong start); 1171598ecfbaSChristoph Hellwig } 1172598ecfbaSChristoph Hellwig } 1173598ecfbaSChristoph Hellwig 1174598ecfbaSChristoph Hellwig void 1175598ecfbaSChristoph Hellwig iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1176598ecfbaSChristoph Hellwig { 1177598ecfbaSChristoph Hellwig struct list_head tmp; 1178598ecfbaSChristoph Hellwig 1179598ecfbaSChristoph Hellwig list_replace_init(&ioend->io_list, &tmp); 1180598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1181598ecfbaSChristoph Hellwig 1182598ecfbaSChristoph Hellwig while (!list_empty(&tmp)) { 1183598ecfbaSChristoph Hellwig ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1184598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1185598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, error); 1186598ecfbaSChristoph Hellwig } 1187598ecfbaSChristoph Hellwig } 1188598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1189598ecfbaSChristoph Hellwig 1190598ecfbaSChristoph Hellwig /* 1191598ecfbaSChristoph Hellwig * We can merge two adjacent ioends if they have the same set of work to do. 1192598ecfbaSChristoph Hellwig */ 1193598ecfbaSChristoph Hellwig static bool 1194598ecfbaSChristoph Hellwig iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1195598ecfbaSChristoph Hellwig { 1196598ecfbaSChristoph Hellwig if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1197598ecfbaSChristoph Hellwig return false; 1198598ecfbaSChristoph Hellwig if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1199598ecfbaSChristoph Hellwig (next->io_flags & IOMAP_F_SHARED)) 1200598ecfbaSChristoph Hellwig return false; 1201598ecfbaSChristoph Hellwig if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1202598ecfbaSChristoph Hellwig (next->io_type == IOMAP_UNWRITTEN)) 1203598ecfbaSChristoph Hellwig return false; 1204598ecfbaSChristoph Hellwig if (ioend->io_offset + ioend->io_size != next->io_offset) 1205598ecfbaSChristoph Hellwig return false; 1206598ecfbaSChristoph Hellwig return true; 1207598ecfbaSChristoph Hellwig } 1208598ecfbaSChristoph Hellwig 1209598ecfbaSChristoph Hellwig void 1210598ecfbaSChristoph Hellwig iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, 1211598ecfbaSChristoph Hellwig void (*merge_private)(struct iomap_ioend *ioend, 1212598ecfbaSChristoph Hellwig struct iomap_ioend *next)) 1213598ecfbaSChristoph Hellwig { 1214598ecfbaSChristoph Hellwig struct iomap_ioend *next; 1215598ecfbaSChristoph Hellwig 1216598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1217598ecfbaSChristoph Hellwig 1218598ecfbaSChristoph Hellwig while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1219598ecfbaSChristoph Hellwig io_list))) { 1220598ecfbaSChristoph Hellwig if (!iomap_ioend_can_merge(ioend, next)) 1221598ecfbaSChristoph Hellwig break; 1222598ecfbaSChristoph Hellwig list_move_tail(&next->io_list, &ioend->io_list); 1223598ecfbaSChristoph Hellwig ioend->io_size += next->io_size; 1224598ecfbaSChristoph Hellwig if (next->io_private && merge_private) 1225598ecfbaSChristoph Hellwig merge_private(ioend, next); 1226598ecfbaSChristoph Hellwig } 1227598ecfbaSChristoph Hellwig } 1228598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1229598ecfbaSChristoph Hellwig 1230598ecfbaSChristoph Hellwig static int 1231598ecfbaSChristoph Hellwig iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b) 1232598ecfbaSChristoph Hellwig { 1233b3d423ecSChristoph Hellwig struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1234b3d423ecSChristoph Hellwig struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1235598ecfbaSChristoph Hellwig 1236598ecfbaSChristoph Hellwig if (ia->io_offset < ib->io_offset) 1237598ecfbaSChristoph Hellwig return -1; 1238b3d423ecSChristoph Hellwig if (ia->io_offset > ib->io_offset) 1239598ecfbaSChristoph Hellwig return 1; 1240598ecfbaSChristoph Hellwig return 0; 1241598ecfbaSChristoph Hellwig } 1242598ecfbaSChristoph Hellwig 1243598ecfbaSChristoph Hellwig void 1244598ecfbaSChristoph Hellwig iomap_sort_ioends(struct list_head *ioend_list) 1245598ecfbaSChristoph Hellwig { 1246598ecfbaSChristoph Hellwig list_sort(NULL, ioend_list, iomap_ioend_compare); 1247598ecfbaSChristoph Hellwig } 1248598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1249598ecfbaSChristoph Hellwig 1250598ecfbaSChristoph Hellwig static void iomap_writepage_end_bio(struct bio *bio) 1251598ecfbaSChristoph Hellwig { 1252598ecfbaSChristoph Hellwig struct iomap_ioend *ioend = bio->bi_private; 1253598ecfbaSChristoph Hellwig 1254598ecfbaSChristoph Hellwig iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1255598ecfbaSChristoph Hellwig } 1256598ecfbaSChristoph Hellwig 1257598ecfbaSChristoph Hellwig /* 1258598ecfbaSChristoph Hellwig * Submit the final bio for an ioend. 1259598ecfbaSChristoph Hellwig * 1260598ecfbaSChristoph Hellwig * If @error is non-zero, it means that we have a situation where some part of 1261598ecfbaSChristoph Hellwig * the submission process has failed after we have marked paged for writeback 1262598ecfbaSChristoph Hellwig * and unlocked them. In this situation, we need to fail the bio instead of 1263598ecfbaSChristoph Hellwig * submitting it. This typically only happens on a filesystem shutdown. 1264598ecfbaSChristoph Hellwig */ 1265598ecfbaSChristoph Hellwig static int 1266598ecfbaSChristoph Hellwig iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1267598ecfbaSChristoph Hellwig int error) 1268598ecfbaSChristoph Hellwig { 1269598ecfbaSChristoph Hellwig ioend->io_bio->bi_private = ioend; 1270598ecfbaSChristoph Hellwig ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1271598ecfbaSChristoph Hellwig 1272598ecfbaSChristoph Hellwig if (wpc->ops->prepare_ioend) 1273598ecfbaSChristoph Hellwig error = wpc->ops->prepare_ioend(ioend, error); 1274598ecfbaSChristoph Hellwig if (error) { 1275598ecfbaSChristoph Hellwig /* 1276598ecfbaSChristoph Hellwig * If we are failing the IO now, just mark the ioend with an 1277598ecfbaSChristoph Hellwig * error and finish it. This will run IO completion immediately 1278598ecfbaSChristoph Hellwig * as there is only one reference to the ioend at this point in 1279598ecfbaSChristoph Hellwig * time. 1280598ecfbaSChristoph Hellwig */ 1281598ecfbaSChristoph Hellwig ioend->io_bio->bi_status = errno_to_blk_status(error); 1282598ecfbaSChristoph Hellwig bio_endio(ioend->io_bio); 1283598ecfbaSChristoph Hellwig return error; 1284598ecfbaSChristoph Hellwig } 1285598ecfbaSChristoph Hellwig 1286598ecfbaSChristoph Hellwig submit_bio(ioend->io_bio); 1287598ecfbaSChristoph Hellwig return 0; 1288598ecfbaSChristoph Hellwig } 1289598ecfbaSChristoph Hellwig 1290598ecfbaSChristoph Hellwig static struct iomap_ioend * 1291598ecfbaSChristoph Hellwig iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1292598ecfbaSChristoph Hellwig loff_t offset, sector_t sector, struct writeback_control *wbc) 1293598ecfbaSChristoph Hellwig { 1294598ecfbaSChristoph Hellwig struct iomap_ioend *ioend; 1295598ecfbaSChristoph Hellwig struct bio *bio; 1296598ecfbaSChristoph Hellwig 1297598ecfbaSChristoph Hellwig bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset); 1298598ecfbaSChristoph Hellwig bio_set_dev(bio, wpc->iomap.bdev); 1299598ecfbaSChristoph Hellwig bio->bi_iter.bi_sector = sector; 1300598ecfbaSChristoph Hellwig bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1301598ecfbaSChristoph Hellwig bio->bi_write_hint = inode->i_write_hint; 1302598ecfbaSChristoph Hellwig wbc_init_bio(wbc, bio); 1303598ecfbaSChristoph Hellwig 1304598ecfbaSChristoph Hellwig ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1305598ecfbaSChristoph Hellwig INIT_LIST_HEAD(&ioend->io_list); 1306598ecfbaSChristoph Hellwig ioend->io_type = wpc->iomap.type; 1307598ecfbaSChristoph Hellwig ioend->io_flags = wpc->iomap.flags; 1308598ecfbaSChristoph Hellwig ioend->io_inode = inode; 1309598ecfbaSChristoph Hellwig ioend->io_size = 0; 1310598ecfbaSChristoph Hellwig ioend->io_offset = offset; 1311598ecfbaSChristoph Hellwig ioend->io_private = NULL; 1312598ecfbaSChristoph Hellwig ioend->io_bio = bio; 1313598ecfbaSChristoph Hellwig return ioend; 1314598ecfbaSChristoph Hellwig } 1315598ecfbaSChristoph Hellwig 1316598ecfbaSChristoph Hellwig /* 1317598ecfbaSChristoph Hellwig * Allocate a new bio, and chain the old bio to the new one. 1318598ecfbaSChristoph Hellwig * 1319598ecfbaSChristoph Hellwig * Note that we have to do perform the chaining in this unintuitive order 1320598ecfbaSChristoph Hellwig * so that the bi_private linkage is set up in the right direction for the 1321598ecfbaSChristoph Hellwig * traversal in iomap_finish_ioend(). 1322598ecfbaSChristoph Hellwig */ 1323598ecfbaSChristoph Hellwig static struct bio * 1324598ecfbaSChristoph Hellwig iomap_chain_bio(struct bio *prev) 1325598ecfbaSChristoph Hellwig { 1326598ecfbaSChristoph Hellwig struct bio *new; 1327598ecfbaSChristoph Hellwig 1328598ecfbaSChristoph Hellwig new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); 1329598ecfbaSChristoph Hellwig bio_copy_dev(new, prev);/* also copies over blkcg information */ 1330598ecfbaSChristoph Hellwig new->bi_iter.bi_sector = bio_end_sector(prev); 1331598ecfbaSChristoph Hellwig new->bi_opf = prev->bi_opf; 1332598ecfbaSChristoph Hellwig new->bi_write_hint = prev->bi_write_hint; 1333598ecfbaSChristoph Hellwig 1334598ecfbaSChristoph Hellwig bio_chain(prev, new); 1335598ecfbaSChristoph Hellwig bio_get(prev); /* for iomap_finish_ioend */ 1336598ecfbaSChristoph Hellwig submit_bio(prev); 1337598ecfbaSChristoph Hellwig return new; 1338598ecfbaSChristoph Hellwig } 1339598ecfbaSChristoph Hellwig 1340598ecfbaSChristoph Hellwig static bool 1341598ecfbaSChristoph Hellwig iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1342598ecfbaSChristoph Hellwig sector_t sector) 1343598ecfbaSChristoph Hellwig { 1344598ecfbaSChristoph Hellwig if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1345598ecfbaSChristoph Hellwig (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1346598ecfbaSChristoph Hellwig return false; 1347598ecfbaSChristoph Hellwig if (wpc->iomap.type != wpc->ioend->io_type) 1348598ecfbaSChristoph Hellwig return false; 1349598ecfbaSChristoph Hellwig if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1350598ecfbaSChristoph Hellwig return false; 1351598ecfbaSChristoph Hellwig if (sector != bio_end_sector(wpc->ioend->io_bio)) 1352598ecfbaSChristoph Hellwig return false; 1353598ecfbaSChristoph Hellwig return true; 1354598ecfbaSChristoph Hellwig } 1355598ecfbaSChristoph Hellwig 1356598ecfbaSChristoph Hellwig /* 1357598ecfbaSChristoph Hellwig * Test to see if we have an existing ioend structure that we could append to 1358598ecfbaSChristoph Hellwig * first, otherwise finish off the current ioend and start another. 1359598ecfbaSChristoph Hellwig */ 1360598ecfbaSChristoph Hellwig static void 1361598ecfbaSChristoph Hellwig iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, 1362598ecfbaSChristoph Hellwig struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1363598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct list_head *iolist) 1364598ecfbaSChristoph Hellwig { 1365598ecfbaSChristoph Hellwig sector_t sector = iomap_sector(&wpc->iomap, offset); 1366598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1367598ecfbaSChristoph Hellwig unsigned poff = offset & (PAGE_SIZE - 1); 1368598ecfbaSChristoph Hellwig bool merged, same_page = false; 1369598ecfbaSChristoph Hellwig 1370598ecfbaSChristoph Hellwig if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) { 1371598ecfbaSChristoph Hellwig if (wpc->ioend) 1372598ecfbaSChristoph Hellwig list_add(&wpc->ioend->io_list, iolist); 1373598ecfbaSChristoph Hellwig wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc); 1374598ecfbaSChristoph Hellwig } 1375598ecfbaSChristoph Hellwig 1376598ecfbaSChristoph Hellwig merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, 1377598ecfbaSChristoph Hellwig &same_page); 1378598ecfbaSChristoph Hellwig if (iop && !same_page) 1379598ecfbaSChristoph Hellwig atomic_inc(&iop->write_count); 1380598ecfbaSChristoph Hellwig 1381598ecfbaSChristoph Hellwig if (!merged) { 1382598ecfbaSChristoph Hellwig if (bio_full(wpc->ioend->io_bio, len)) { 1383598ecfbaSChristoph Hellwig wpc->ioend->io_bio = 1384598ecfbaSChristoph Hellwig iomap_chain_bio(wpc->ioend->io_bio); 1385598ecfbaSChristoph Hellwig } 1386598ecfbaSChristoph Hellwig bio_add_page(wpc->ioend->io_bio, page, len, poff); 1387598ecfbaSChristoph Hellwig } 1388598ecfbaSChristoph Hellwig 1389598ecfbaSChristoph Hellwig wpc->ioend->io_size += len; 1390598ecfbaSChristoph Hellwig wbc_account_cgroup_owner(wbc, page, len); 1391598ecfbaSChristoph Hellwig } 1392598ecfbaSChristoph Hellwig 1393598ecfbaSChristoph Hellwig /* 1394598ecfbaSChristoph Hellwig * We implement an immediate ioend submission policy here to avoid needing to 1395598ecfbaSChristoph Hellwig * chain multiple ioends and hence nest mempool allocations which can violate 1396598ecfbaSChristoph Hellwig * forward progress guarantees we need to provide. The current ioend we are 1397598ecfbaSChristoph Hellwig * adding blocks to is cached on the writepage context, and if the new block 1398598ecfbaSChristoph Hellwig * does not append to the cached ioend it will create a new ioend and cache that 1399598ecfbaSChristoph Hellwig * instead. 1400598ecfbaSChristoph Hellwig * 1401598ecfbaSChristoph Hellwig * If a new ioend is created and cached, the old ioend is returned and queued 1402598ecfbaSChristoph Hellwig * locally for submission once the entire page is processed or an error has been 1403598ecfbaSChristoph Hellwig * detected. While ioends are submitted immediately after they are completed, 1404598ecfbaSChristoph Hellwig * batching optimisations are provided by higher level block plugging. 1405598ecfbaSChristoph Hellwig * 1406598ecfbaSChristoph Hellwig * At the end of a writeback pass, there will be a cached ioend remaining on the 1407598ecfbaSChristoph Hellwig * writepage context that the caller will need to submit. 1408598ecfbaSChristoph Hellwig */ 1409598ecfbaSChristoph Hellwig static int 1410598ecfbaSChristoph Hellwig iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1411598ecfbaSChristoph Hellwig struct writeback_control *wbc, struct inode *inode, 1412598ecfbaSChristoph Hellwig struct page *page, u64 end_offset) 1413598ecfbaSChristoph Hellwig { 1414598ecfbaSChristoph Hellwig struct iomap_page *iop = to_iomap_page(page); 1415598ecfbaSChristoph Hellwig struct iomap_ioend *ioend, *next; 1416598ecfbaSChristoph Hellwig unsigned len = i_blocksize(inode); 1417598ecfbaSChristoph Hellwig u64 file_offset; /* file offset of page */ 1418598ecfbaSChristoph Hellwig int error = 0, count = 0, i; 1419598ecfbaSChristoph Hellwig LIST_HEAD(submit_list); 1420598ecfbaSChristoph Hellwig 1421598ecfbaSChristoph Hellwig WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); 1422598ecfbaSChristoph Hellwig WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0); 1423598ecfbaSChristoph Hellwig 1424598ecfbaSChristoph Hellwig /* 1425598ecfbaSChristoph Hellwig * Walk through the page to find areas to write back. If we run off the 1426598ecfbaSChristoph Hellwig * end of the current map or find the current map invalid, grab a new 1427598ecfbaSChristoph Hellwig * one. 1428598ecfbaSChristoph Hellwig */ 1429598ecfbaSChristoph Hellwig for (i = 0, file_offset = page_offset(page); 1430598ecfbaSChristoph Hellwig i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; 1431598ecfbaSChristoph Hellwig i++, file_offset += len) { 1432598ecfbaSChristoph Hellwig if (iop && !test_bit(i, iop->uptodate)) 1433598ecfbaSChristoph Hellwig continue; 1434598ecfbaSChristoph Hellwig 1435598ecfbaSChristoph Hellwig error = wpc->ops->map_blocks(wpc, inode, file_offset); 1436598ecfbaSChristoph Hellwig if (error) 1437598ecfbaSChristoph Hellwig break; 14383e19e6f3SChristoph Hellwig if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 14393e19e6f3SChristoph Hellwig continue; 1440598ecfbaSChristoph Hellwig if (wpc->iomap.type == IOMAP_HOLE) 1441598ecfbaSChristoph Hellwig continue; 1442598ecfbaSChristoph Hellwig iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, 1443598ecfbaSChristoph Hellwig &submit_list); 1444598ecfbaSChristoph Hellwig count++; 1445598ecfbaSChristoph Hellwig } 1446598ecfbaSChristoph Hellwig 1447598ecfbaSChristoph Hellwig WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1448598ecfbaSChristoph Hellwig WARN_ON_ONCE(!PageLocked(page)); 1449598ecfbaSChristoph Hellwig WARN_ON_ONCE(PageWriteback(page)); 1450598ecfbaSChristoph Hellwig 1451598ecfbaSChristoph Hellwig /* 1452598ecfbaSChristoph Hellwig * We cannot cancel the ioend directly here on error. We may have 1453598ecfbaSChristoph Hellwig * already set other pages under writeback and hence we have to run I/O 1454598ecfbaSChristoph Hellwig * completion to mark the error state of the pages under writeback 1455598ecfbaSChristoph Hellwig * appropriately. 1456598ecfbaSChristoph Hellwig */ 1457598ecfbaSChristoph Hellwig if (unlikely(error)) { 1458598ecfbaSChristoph Hellwig if (!count) { 1459598ecfbaSChristoph Hellwig /* 1460598ecfbaSChristoph Hellwig * If the current page hasn't been added to ioend, it 1461598ecfbaSChristoph Hellwig * won't be affected by I/O completions and we must 1462598ecfbaSChristoph Hellwig * discard and unlock it right here. 1463598ecfbaSChristoph Hellwig */ 1464598ecfbaSChristoph Hellwig if (wpc->ops->discard_page) 1465598ecfbaSChristoph Hellwig wpc->ops->discard_page(page); 1466598ecfbaSChristoph Hellwig ClearPageUptodate(page); 1467598ecfbaSChristoph Hellwig unlock_page(page); 1468598ecfbaSChristoph Hellwig goto done; 1469598ecfbaSChristoph Hellwig } 1470598ecfbaSChristoph Hellwig 1471598ecfbaSChristoph Hellwig /* 1472598ecfbaSChristoph Hellwig * If the page was not fully cleaned, we need to ensure that the 1473598ecfbaSChristoph Hellwig * higher layers come back to it correctly. That means we need 1474598ecfbaSChristoph Hellwig * to keep the page dirty, and for WB_SYNC_ALL writeback we need 1475598ecfbaSChristoph Hellwig * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed 1476598ecfbaSChristoph Hellwig * so another attempt to write this page in this writeback sweep 1477598ecfbaSChristoph Hellwig * will be made. 1478598ecfbaSChristoph Hellwig */ 1479598ecfbaSChristoph Hellwig set_page_writeback_keepwrite(page); 1480598ecfbaSChristoph Hellwig } else { 1481598ecfbaSChristoph Hellwig clear_page_dirty_for_io(page); 1482598ecfbaSChristoph Hellwig set_page_writeback(page); 1483598ecfbaSChristoph Hellwig } 1484598ecfbaSChristoph Hellwig 1485598ecfbaSChristoph Hellwig unlock_page(page); 1486598ecfbaSChristoph Hellwig 1487598ecfbaSChristoph Hellwig /* 1488598ecfbaSChristoph Hellwig * Preserve the original error if there was one, otherwise catch 1489598ecfbaSChristoph Hellwig * submission errors here and propagate into subsequent ioend 1490598ecfbaSChristoph Hellwig * submissions. 1491598ecfbaSChristoph Hellwig */ 1492598ecfbaSChristoph Hellwig list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1493598ecfbaSChristoph Hellwig int error2; 1494598ecfbaSChristoph Hellwig 1495598ecfbaSChristoph Hellwig list_del_init(&ioend->io_list); 1496598ecfbaSChristoph Hellwig error2 = iomap_submit_ioend(wpc, ioend, error); 1497598ecfbaSChristoph Hellwig if (error2 && !error) 1498598ecfbaSChristoph Hellwig error = error2; 1499598ecfbaSChristoph Hellwig } 1500598ecfbaSChristoph Hellwig 1501598ecfbaSChristoph Hellwig /* 1502598ecfbaSChristoph Hellwig * We can end up here with no error and nothing to write only if we race 1503598ecfbaSChristoph Hellwig * with a partial page truncate on a sub-page block sized filesystem. 1504598ecfbaSChristoph Hellwig */ 1505598ecfbaSChristoph Hellwig if (!count) 1506598ecfbaSChristoph Hellwig end_page_writeback(page); 1507598ecfbaSChristoph Hellwig done: 1508598ecfbaSChristoph Hellwig mapping_set_error(page->mapping, error); 1509598ecfbaSChristoph Hellwig return error; 1510598ecfbaSChristoph Hellwig } 1511598ecfbaSChristoph Hellwig 1512598ecfbaSChristoph Hellwig /* 1513598ecfbaSChristoph Hellwig * Write out a dirty page. 1514598ecfbaSChristoph Hellwig * 1515598ecfbaSChristoph Hellwig * For delalloc space on the page we need to allocate space and flush it. 1516598ecfbaSChristoph Hellwig * For unwritten space on the page we need to start the conversion to 1517598ecfbaSChristoph Hellwig * regular allocated space. 1518598ecfbaSChristoph Hellwig */ 1519598ecfbaSChristoph Hellwig static int 1520598ecfbaSChristoph Hellwig iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1521598ecfbaSChristoph Hellwig { 1522598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc = data; 1523598ecfbaSChristoph Hellwig struct inode *inode = page->mapping->host; 1524598ecfbaSChristoph Hellwig pgoff_t end_index; 1525598ecfbaSChristoph Hellwig u64 end_offset; 1526598ecfbaSChristoph Hellwig loff_t offset; 1527598ecfbaSChristoph Hellwig 1528598ecfbaSChristoph Hellwig trace_iomap_writepage(inode, page, 0, 0); 1529598ecfbaSChristoph Hellwig 1530598ecfbaSChristoph Hellwig /* 1531598ecfbaSChristoph Hellwig * Refuse to write the page out if we are called from reclaim context. 1532598ecfbaSChristoph Hellwig * 1533598ecfbaSChristoph Hellwig * This avoids stack overflows when called from deeply used stacks in 1534598ecfbaSChristoph Hellwig * random callers for direct reclaim or memcg reclaim. We explicitly 1535598ecfbaSChristoph Hellwig * allow reclaim from kswapd as the stack usage there is relatively low. 1536598ecfbaSChristoph Hellwig * 1537598ecfbaSChristoph Hellwig * This should never happen except in the case of a VM regression so 1538598ecfbaSChristoph Hellwig * warn about it. 1539598ecfbaSChristoph Hellwig */ 1540598ecfbaSChristoph Hellwig if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1541598ecfbaSChristoph Hellwig PF_MEMALLOC)) 1542598ecfbaSChristoph Hellwig goto redirty; 1543598ecfbaSChristoph Hellwig 1544598ecfbaSChristoph Hellwig /* 1545598ecfbaSChristoph Hellwig * Given that we do not allow direct reclaim to call us, we should 1546598ecfbaSChristoph Hellwig * never be called in a recursive filesystem reclaim context. 1547598ecfbaSChristoph Hellwig */ 1548598ecfbaSChristoph Hellwig if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) 1549598ecfbaSChristoph Hellwig goto redirty; 1550598ecfbaSChristoph Hellwig 1551598ecfbaSChristoph Hellwig /* 1552598ecfbaSChristoph Hellwig * Is this page beyond the end of the file? 1553598ecfbaSChristoph Hellwig * 1554598ecfbaSChristoph Hellwig * The page index is less than the end_index, adjust the end_offset 1555598ecfbaSChristoph Hellwig * to the highest offset that this page should represent. 1556598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1557598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1558598ecfbaSChristoph Hellwig * ----------------------------------------------------- 1559598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | | 1560598ecfbaSChristoph Hellwig * ^--------------------------------^----------|-------- 1561598ecfbaSChristoph Hellwig * | desired writeback range | see else | 1562598ecfbaSChristoph Hellwig * ---------------------------------^------------------| 1563598ecfbaSChristoph Hellwig */ 1564598ecfbaSChristoph Hellwig offset = i_size_read(inode); 1565598ecfbaSChristoph Hellwig end_index = offset >> PAGE_SHIFT; 1566598ecfbaSChristoph Hellwig if (page->index < end_index) 1567598ecfbaSChristoph Hellwig end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT; 1568598ecfbaSChristoph Hellwig else { 1569598ecfbaSChristoph Hellwig /* 1570598ecfbaSChristoph Hellwig * Check whether the page to write out is beyond or straddles 1571598ecfbaSChristoph Hellwig * i_size or not. 1572598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1573598ecfbaSChristoph Hellwig * | file mapping | <EOF> | 1574598ecfbaSChristoph Hellwig * ------------------------------------------------------- 1575598ecfbaSChristoph Hellwig * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1576598ecfbaSChristoph Hellwig * ^--------------------------------^-----------|--------- 1577598ecfbaSChristoph Hellwig * | | Straddles | 1578598ecfbaSChristoph Hellwig * ---------------------------------^-----------|--------| 1579598ecfbaSChristoph Hellwig */ 1580598ecfbaSChristoph Hellwig unsigned offset_into_page = offset & (PAGE_SIZE - 1); 1581598ecfbaSChristoph Hellwig 1582598ecfbaSChristoph Hellwig /* 1583598ecfbaSChristoph Hellwig * Skip the page if it is fully outside i_size, e.g. due to a 1584598ecfbaSChristoph Hellwig * truncate operation that is in progress. We must redirty the 1585598ecfbaSChristoph Hellwig * page so that reclaim stops reclaiming it. Otherwise 1586598ecfbaSChristoph Hellwig * iomap_vm_releasepage() is called on it and gets confused. 1587598ecfbaSChristoph Hellwig * 1588598ecfbaSChristoph Hellwig * Note that the end_index is unsigned long, it would overflow 1589598ecfbaSChristoph Hellwig * if the given offset is greater than 16TB on 32-bit system 1590598ecfbaSChristoph Hellwig * and if we do check the page is fully outside i_size or not 1591598ecfbaSChristoph Hellwig * via "if (page->index >= end_index + 1)" as "end_index + 1" 1592598ecfbaSChristoph Hellwig * will be evaluated to 0. Hence this page will be redirtied 1593598ecfbaSChristoph Hellwig * and be written out repeatedly which would result in an 1594598ecfbaSChristoph Hellwig * infinite loop, the user program that perform this operation 1595598ecfbaSChristoph Hellwig * will hang. Instead, we can verify this situation by checking 1596598ecfbaSChristoph Hellwig * if the page to write is totally beyond the i_size or if it's 1597598ecfbaSChristoph Hellwig * offset is just equal to the EOF. 1598598ecfbaSChristoph Hellwig */ 1599598ecfbaSChristoph Hellwig if (page->index > end_index || 1600598ecfbaSChristoph Hellwig (page->index == end_index && offset_into_page == 0)) 1601598ecfbaSChristoph Hellwig goto redirty; 1602598ecfbaSChristoph Hellwig 1603598ecfbaSChristoph Hellwig /* 1604598ecfbaSChristoph Hellwig * The page straddles i_size. It must be zeroed out on each 1605598ecfbaSChristoph Hellwig * and every writepage invocation because it may be mmapped. 1606598ecfbaSChristoph Hellwig * "A file is mapped in multiples of the page size. For a file 1607598ecfbaSChristoph Hellwig * that is not a multiple of the page size, the remaining 1608598ecfbaSChristoph Hellwig * memory is zeroed when mapped, and writes to that region are 1609598ecfbaSChristoph Hellwig * not written out to the file." 1610598ecfbaSChristoph Hellwig */ 1611598ecfbaSChristoph Hellwig zero_user_segment(page, offset_into_page, PAGE_SIZE); 1612598ecfbaSChristoph Hellwig 1613598ecfbaSChristoph Hellwig /* Adjust the end_offset to the end of file */ 1614598ecfbaSChristoph Hellwig end_offset = offset; 1615598ecfbaSChristoph Hellwig } 1616598ecfbaSChristoph Hellwig 1617598ecfbaSChristoph Hellwig return iomap_writepage_map(wpc, wbc, inode, page, end_offset); 1618598ecfbaSChristoph Hellwig 1619598ecfbaSChristoph Hellwig redirty: 1620598ecfbaSChristoph Hellwig redirty_page_for_writepage(wbc, page); 1621598ecfbaSChristoph Hellwig unlock_page(page); 1622598ecfbaSChristoph Hellwig return 0; 1623598ecfbaSChristoph Hellwig } 1624598ecfbaSChristoph Hellwig 1625598ecfbaSChristoph Hellwig int 1626598ecfbaSChristoph Hellwig iomap_writepage(struct page *page, struct writeback_control *wbc, 1627598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1628598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1629598ecfbaSChristoph Hellwig { 1630598ecfbaSChristoph Hellwig int ret; 1631598ecfbaSChristoph Hellwig 1632598ecfbaSChristoph Hellwig wpc->ops = ops; 1633598ecfbaSChristoph Hellwig ret = iomap_do_writepage(page, wbc, wpc); 1634598ecfbaSChristoph Hellwig if (!wpc->ioend) 1635598ecfbaSChristoph Hellwig return ret; 1636598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1637598ecfbaSChristoph Hellwig } 1638598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepage); 1639598ecfbaSChristoph Hellwig 1640598ecfbaSChristoph Hellwig int 1641598ecfbaSChristoph Hellwig iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1642598ecfbaSChristoph Hellwig struct iomap_writepage_ctx *wpc, 1643598ecfbaSChristoph Hellwig const struct iomap_writeback_ops *ops) 1644598ecfbaSChristoph Hellwig { 1645598ecfbaSChristoph Hellwig int ret; 1646598ecfbaSChristoph Hellwig 1647598ecfbaSChristoph Hellwig wpc->ops = ops; 1648598ecfbaSChristoph Hellwig ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1649598ecfbaSChristoph Hellwig if (!wpc->ioend) 1650598ecfbaSChristoph Hellwig return ret; 1651598ecfbaSChristoph Hellwig return iomap_submit_ioend(wpc, wpc->ioend, ret); 1652598ecfbaSChristoph Hellwig } 1653598ecfbaSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_writepages); 1654598ecfbaSChristoph Hellwig 1655598ecfbaSChristoph Hellwig static int __init iomap_init(void) 1656598ecfbaSChristoph Hellwig { 1657598ecfbaSChristoph Hellwig return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1658598ecfbaSChristoph Hellwig offsetof(struct iomap_ioend, io_inline_bio), 1659598ecfbaSChristoph Hellwig BIOSET_NEED_BVECS); 1660598ecfbaSChristoph Hellwig } 1661598ecfbaSChristoph Hellwig fs_initcall(iomap_init); 1662