1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0 2afc51aaaSDarrick J. Wong /* 3afc51aaaSDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc. 4afc51aaaSDarrick J. Wong * Copyright (c) 2016-2018 Christoph Hellwig. 5afc51aaaSDarrick J. Wong */ 6afc51aaaSDarrick J. Wong #include <linux/module.h> 7afc51aaaSDarrick J. Wong #include <linux/compiler.h> 8afc51aaaSDarrick J. Wong #include <linux/fs.h> 9afc51aaaSDarrick J. Wong #include <linux/iomap.h> 10afc51aaaSDarrick J. Wong #include <linux/pagemap.h> 11afc51aaaSDarrick J. Wong #include <linux/uio.h> 12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h> 13afc51aaaSDarrick J. Wong #include <linux/dax.h> 14afc51aaaSDarrick J. Wong #include <linux/writeback.h> 15afc51aaaSDarrick J. Wong #include <linux/swap.h> 16afc51aaaSDarrick J. Wong #include <linux/bio.h> 17afc51aaaSDarrick J. Wong #include <linux/sched/signal.h> 18afc51aaaSDarrick J. Wong #include <linux/migrate.h> 19*9e91c572SChristoph Hellwig #include "trace.h" 20afc51aaaSDarrick J. Wong 21afc51aaaSDarrick J. Wong #include "../internal.h" 22afc51aaaSDarrick J. Wong 23afc51aaaSDarrick J. Wong static struct iomap_page * 24afc51aaaSDarrick J. Wong iomap_page_create(struct inode *inode, struct page *page) 25afc51aaaSDarrick J. Wong { 26afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 27afc51aaaSDarrick J. Wong 28afc51aaaSDarrick J. Wong if (iop || i_blocksize(inode) == PAGE_SIZE) 29afc51aaaSDarrick J. Wong return iop; 30afc51aaaSDarrick J. Wong 31afc51aaaSDarrick J. Wong iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); 32afc51aaaSDarrick J. Wong atomic_set(&iop->read_count, 0); 33afc51aaaSDarrick J. Wong atomic_set(&iop->write_count, 0); 34afc51aaaSDarrick J. Wong bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 35afc51aaaSDarrick J. Wong 36afc51aaaSDarrick J. Wong /* 37afc51aaaSDarrick J. Wong * migrate_page_move_mapping() assumes that pages with private data have 38afc51aaaSDarrick J. Wong * their count elevated by 1. 39afc51aaaSDarrick J. Wong */ 40afc51aaaSDarrick J. Wong get_page(page); 41afc51aaaSDarrick J. Wong set_page_private(page, (unsigned long)iop); 42afc51aaaSDarrick J. Wong SetPagePrivate(page); 43afc51aaaSDarrick J. Wong return iop; 44afc51aaaSDarrick J. Wong } 45afc51aaaSDarrick J. Wong 46afc51aaaSDarrick J. Wong static void 47afc51aaaSDarrick J. Wong iomap_page_release(struct page *page) 48afc51aaaSDarrick J. Wong { 49afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 50afc51aaaSDarrick J. Wong 51afc51aaaSDarrick J. Wong if (!iop) 52afc51aaaSDarrick J. Wong return; 53afc51aaaSDarrick J. Wong WARN_ON_ONCE(atomic_read(&iop->read_count)); 54afc51aaaSDarrick J. Wong WARN_ON_ONCE(atomic_read(&iop->write_count)); 55afc51aaaSDarrick J. Wong ClearPagePrivate(page); 56afc51aaaSDarrick J. Wong set_page_private(page, 0); 57afc51aaaSDarrick J. Wong put_page(page); 58afc51aaaSDarrick J. Wong kfree(iop); 59afc51aaaSDarrick J. Wong } 60afc51aaaSDarrick J. Wong 61afc51aaaSDarrick J. Wong /* 62afc51aaaSDarrick J. Wong * Calculate the range inside the page that we actually need to read. 63afc51aaaSDarrick J. Wong */ 64afc51aaaSDarrick J. Wong static void 65afc51aaaSDarrick J. Wong iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 66afc51aaaSDarrick J. Wong loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 67afc51aaaSDarrick J. Wong { 68afc51aaaSDarrick J. Wong loff_t orig_pos = *pos; 69afc51aaaSDarrick J. Wong loff_t isize = i_size_read(inode); 70afc51aaaSDarrick J. Wong unsigned block_bits = inode->i_blkbits; 71afc51aaaSDarrick J. Wong unsigned block_size = (1 << block_bits); 72afc51aaaSDarrick J. Wong unsigned poff = offset_in_page(*pos); 73afc51aaaSDarrick J. Wong unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 74afc51aaaSDarrick J. Wong unsigned first = poff >> block_bits; 75afc51aaaSDarrick J. Wong unsigned last = (poff + plen - 1) >> block_bits; 76afc51aaaSDarrick J. Wong 77afc51aaaSDarrick J. Wong /* 78afc51aaaSDarrick J. Wong * If the block size is smaller than the page size we need to check the 79afc51aaaSDarrick J. Wong * per-block uptodate status and adjust the offset and length if needed 80afc51aaaSDarrick J. Wong * to avoid reading in already uptodate ranges. 81afc51aaaSDarrick J. Wong */ 82afc51aaaSDarrick J. Wong if (iop) { 83afc51aaaSDarrick J. Wong unsigned int i; 84afc51aaaSDarrick J. Wong 85afc51aaaSDarrick J. Wong /* move forward for each leading block marked uptodate */ 86afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) { 87afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 88afc51aaaSDarrick J. Wong break; 89afc51aaaSDarrick J. Wong *pos += block_size; 90afc51aaaSDarrick J. Wong poff += block_size; 91afc51aaaSDarrick J. Wong plen -= block_size; 92afc51aaaSDarrick J. Wong first++; 93afc51aaaSDarrick J. Wong } 94afc51aaaSDarrick J. Wong 95afc51aaaSDarrick J. Wong /* truncate len if we find any trailing uptodate block(s) */ 96afc51aaaSDarrick J. Wong for ( ; i <= last; i++) { 97afc51aaaSDarrick J. Wong if (test_bit(i, iop->uptodate)) { 98afc51aaaSDarrick J. Wong plen -= (last - i + 1) * block_size; 99afc51aaaSDarrick J. Wong last = i - 1; 100afc51aaaSDarrick J. Wong break; 101afc51aaaSDarrick J. Wong } 102afc51aaaSDarrick J. Wong } 103afc51aaaSDarrick J. Wong } 104afc51aaaSDarrick J. Wong 105afc51aaaSDarrick J. Wong /* 106afc51aaaSDarrick J. Wong * If the extent spans the block that contains the i_size we need to 107afc51aaaSDarrick J. Wong * handle both halves separately so that we properly zero data in the 108afc51aaaSDarrick J. Wong * page cache for blocks that are entirely outside of i_size. 109afc51aaaSDarrick J. Wong */ 110afc51aaaSDarrick J. Wong if (orig_pos <= isize && orig_pos + length > isize) { 111afc51aaaSDarrick J. Wong unsigned end = offset_in_page(isize - 1) >> block_bits; 112afc51aaaSDarrick J. Wong 113afc51aaaSDarrick J. Wong if (first <= end && last > end) 114afc51aaaSDarrick J. Wong plen -= (last - end) * block_size; 115afc51aaaSDarrick J. Wong } 116afc51aaaSDarrick J. Wong 117afc51aaaSDarrick J. Wong *offp = poff; 118afc51aaaSDarrick J. Wong *lenp = plen; 119afc51aaaSDarrick J. Wong } 120afc51aaaSDarrick J. Wong 121afc51aaaSDarrick J. Wong static void 122afc51aaaSDarrick J. Wong iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) 123afc51aaaSDarrick J. Wong { 124afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 125afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 126afc51aaaSDarrick J. Wong unsigned first = off >> inode->i_blkbits; 127afc51aaaSDarrick J. Wong unsigned last = (off + len - 1) >> inode->i_blkbits; 128afc51aaaSDarrick J. Wong unsigned int i; 129afc51aaaSDarrick J. Wong bool uptodate = true; 130afc51aaaSDarrick J. Wong 131afc51aaaSDarrick J. Wong if (iop) { 132afc51aaaSDarrick J. Wong for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { 133afc51aaaSDarrick J. Wong if (i >= first && i <= last) 134afc51aaaSDarrick J. Wong set_bit(i, iop->uptodate); 135afc51aaaSDarrick J. Wong else if (!test_bit(i, iop->uptodate)) 136afc51aaaSDarrick J. Wong uptodate = false; 137afc51aaaSDarrick J. Wong } 138afc51aaaSDarrick J. Wong } 139afc51aaaSDarrick J. Wong 140afc51aaaSDarrick J. Wong if (uptodate && !PageError(page)) 141afc51aaaSDarrick J. Wong SetPageUptodate(page); 142afc51aaaSDarrick J. Wong } 143afc51aaaSDarrick J. Wong 144afc51aaaSDarrick J. Wong static void 145afc51aaaSDarrick J. Wong iomap_read_finish(struct iomap_page *iop, struct page *page) 146afc51aaaSDarrick J. Wong { 147afc51aaaSDarrick J. Wong if (!iop || atomic_dec_and_test(&iop->read_count)) 148afc51aaaSDarrick J. Wong unlock_page(page); 149afc51aaaSDarrick J. Wong } 150afc51aaaSDarrick J. Wong 151afc51aaaSDarrick J. Wong static void 152afc51aaaSDarrick J. Wong iomap_read_page_end_io(struct bio_vec *bvec, int error) 153afc51aaaSDarrick J. Wong { 154afc51aaaSDarrick J. Wong struct page *page = bvec->bv_page; 155afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 156afc51aaaSDarrick J. Wong 157afc51aaaSDarrick J. Wong if (unlikely(error)) { 158afc51aaaSDarrick J. Wong ClearPageUptodate(page); 159afc51aaaSDarrick J. Wong SetPageError(page); 160afc51aaaSDarrick J. Wong } else { 161afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); 162afc51aaaSDarrick J. Wong } 163afc51aaaSDarrick J. Wong 164afc51aaaSDarrick J. Wong iomap_read_finish(iop, page); 165afc51aaaSDarrick J. Wong } 166afc51aaaSDarrick J. Wong 167afc51aaaSDarrick J. Wong static void 168afc51aaaSDarrick J. Wong iomap_read_end_io(struct bio *bio) 169afc51aaaSDarrick J. Wong { 170afc51aaaSDarrick J. Wong int error = blk_status_to_errno(bio->bi_status); 171afc51aaaSDarrick J. Wong struct bio_vec *bvec; 172afc51aaaSDarrick J. Wong struct bvec_iter_all iter_all; 173afc51aaaSDarrick J. Wong 174afc51aaaSDarrick J. Wong bio_for_each_segment_all(bvec, bio, iter_all) 175afc51aaaSDarrick J. Wong iomap_read_page_end_io(bvec, error); 176afc51aaaSDarrick J. Wong bio_put(bio); 177afc51aaaSDarrick J. Wong } 178afc51aaaSDarrick J. Wong 179afc51aaaSDarrick J. Wong struct iomap_readpage_ctx { 180afc51aaaSDarrick J. Wong struct page *cur_page; 181afc51aaaSDarrick J. Wong bool cur_page_in_bio; 182afc51aaaSDarrick J. Wong bool is_readahead; 183afc51aaaSDarrick J. Wong struct bio *bio; 184afc51aaaSDarrick J. Wong struct list_head *pages; 185afc51aaaSDarrick J. Wong }; 186afc51aaaSDarrick J. Wong 187afc51aaaSDarrick J. Wong static void 188afc51aaaSDarrick J. Wong iomap_read_inline_data(struct inode *inode, struct page *page, 189afc51aaaSDarrick J. Wong struct iomap *iomap) 190afc51aaaSDarrick J. Wong { 191afc51aaaSDarrick J. Wong size_t size = i_size_read(inode); 192afc51aaaSDarrick J. Wong void *addr; 193afc51aaaSDarrick J. Wong 194afc51aaaSDarrick J. Wong if (PageUptodate(page)) 195afc51aaaSDarrick J. Wong return; 196afc51aaaSDarrick J. Wong 197afc51aaaSDarrick J. Wong BUG_ON(page->index); 198afc51aaaSDarrick J. Wong BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); 199afc51aaaSDarrick J. Wong 200afc51aaaSDarrick J. Wong addr = kmap_atomic(page); 201afc51aaaSDarrick J. Wong memcpy(addr, iomap->inline_data, size); 202afc51aaaSDarrick J. Wong memset(addr + size, 0, PAGE_SIZE - size); 203afc51aaaSDarrick J. Wong kunmap_atomic(addr); 204afc51aaaSDarrick J. Wong SetPageUptodate(page); 205afc51aaaSDarrick J. Wong } 206afc51aaaSDarrick J. Wong 207009d8d84SChristoph Hellwig static inline bool iomap_block_needs_zeroing(struct inode *inode, 208009d8d84SChristoph Hellwig struct iomap *iomap, loff_t pos) 209009d8d84SChristoph Hellwig { 210009d8d84SChristoph Hellwig return iomap->type != IOMAP_MAPPED || 211009d8d84SChristoph Hellwig (iomap->flags & IOMAP_F_NEW) || 212009d8d84SChristoph Hellwig pos >= i_size_read(inode); 213009d8d84SChristoph Hellwig } 214009d8d84SChristoph Hellwig 215afc51aaaSDarrick J. Wong static loff_t 216afc51aaaSDarrick J. Wong iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 217afc51aaaSDarrick J. Wong struct iomap *iomap) 218afc51aaaSDarrick J. Wong { 219afc51aaaSDarrick J. Wong struct iomap_readpage_ctx *ctx = data; 220afc51aaaSDarrick J. Wong struct page *page = ctx->cur_page; 221afc51aaaSDarrick J. Wong struct iomap_page *iop = iomap_page_create(inode, page); 222afc51aaaSDarrick J. Wong bool same_page = false, is_contig = false; 223afc51aaaSDarrick J. Wong loff_t orig_pos = pos; 224afc51aaaSDarrick J. Wong unsigned poff, plen; 225afc51aaaSDarrick J. Wong sector_t sector; 226afc51aaaSDarrick J. Wong 227afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) { 228afc51aaaSDarrick J. Wong WARN_ON_ONCE(pos); 229afc51aaaSDarrick J. Wong iomap_read_inline_data(inode, page, iomap); 230afc51aaaSDarrick J. Wong return PAGE_SIZE; 231afc51aaaSDarrick J. Wong } 232afc51aaaSDarrick J. Wong 233afc51aaaSDarrick J. Wong /* zero post-eof blocks as the page may be mapped */ 234afc51aaaSDarrick J. Wong iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); 235afc51aaaSDarrick J. Wong if (plen == 0) 236afc51aaaSDarrick J. Wong goto done; 237afc51aaaSDarrick J. Wong 238009d8d84SChristoph Hellwig if (iomap_block_needs_zeroing(inode, iomap, pos)) { 239afc51aaaSDarrick J. Wong zero_user(page, poff, plen); 240afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, poff, plen); 241afc51aaaSDarrick J. Wong goto done; 242afc51aaaSDarrick J. Wong } 243afc51aaaSDarrick J. Wong 244afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = true; 245afc51aaaSDarrick J. Wong 246afc51aaaSDarrick J. Wong /* 247afc51aaaSDarrick J. Wong * Try to merge into a previous segment if we can. 248afc51aaaSDarrick J. Wong */ 249afc51aaaSDarrick J. Wong sector = iomap_sector(iomap, pos); 250afc51aaaSDarrick J. Wong if (ctx->bio && bio_end_sector(ctx->bio) == sector) 251afc51aaaSDarrick J. Wong is_contig = true; 252afc51aaaSDarrick J. Wong 253afc51aaaSDarrick J. Wong if (is_contig && 254afc51aaaSDarrick J. Wong __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) { 255afc51aaaSDarrick J. Wong if (!same_page && iop) 256afc51aaaSDarrick J. Wong atomic_inc(&iop->read_count); 257afc51aaaSDarrick J. Wong goto done; 258afc51aaaSDarrick J. Wong } 259afc51aaaSDarrick J. Wong 260afc51aaaSDarrick J. Wong /* 261afc51aaaSDarrick J. Wong * If we start a new segment we need to increase the read count, and we 262afc51aaaSDarrick J. Wong * need to do so before submitting any previous full bio to make sure 263afc51aaaSDarrick J. Wong * that we don't prematurely unlock the page. 264afc51aaaSDarrick J. Wong */ 265afc51aaaSDarrick J. Wong if (iop) 266afc51aaaSDarrick J. Wong atomic_inc(&iop->read_count); 267afc51aaaSDarrick J. Wong 268afc51aaaSDarrick J. Wong if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) { 269afc51aaaSDarrick J. Wong gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 270afc51aaaSDarrick J. Wong int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 271afc51aaaSDarrick J. Wong 272afc51aaaSDarrick J. Wong if (ctx->bio) 273afc51aaaSDarrick J. Wong submit_bio(ctx->bio); 274afc51aaaSDarrick J. Wong 275afc51aaaSDarrick J. Wong if (ctx->is_readahead) /* same as readahead_gfp_mask */ 276afc51aaaSDarrick J. Wong gfp |= __GFP_NORETRY | __GFP_NOWARN; 277afc51aaaSDarrick J. Wong ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); 278afc51aaaSDarrick J. Wong ctx->bio->bi_opf = REQ_OP_READ; 279afc51aaaSDarrick J. Wong if (ctx->is_readahead) 280afc51aaaSDarrick J. Wong ctx->bio->bi_opf |= REQ_RAHEAD; 281afc51aaaSDarrick J. Wong ctx->bio->bi_iter.bi_sector = sector; 282afc51aaaSDarrick J. Wong bio_set_dev(ctx->bio, iomap->bdev); 283afc51aaaSDarrick J. Wong ctx->bio->bi_end_io = iomap_read_end_io; 284afc51aaaSDarrick J. Wong } 285afc51aaaSDarrick J. Wong 286afc51aaaSDarrick J. Wong bio_add_page(ctx->bio, page, plen, poff); 287afc51aaaSDarrick J. Wong done: 288afc51aaaSDarrick J. Wong /* 289afc51aaaSDarrick J. Wong * Move the caller beyond our range so that it keeps making progress. 290afc51aaaSDarrick J. Wong * For that we have to include any leading non-uptodate ranges, but 291afc51aaaSDarrick J. Wong * we can skip trailing ones as they will be handled in the next 292afc51aaaSDarrick J. Wong * iteration. 293afc51aaaSDarrick J. Wong */ 294afc51aaaSDarrick J. Wong return pos - orig_pos + plen; 295afc51aaaSDarrick J. Wong } 296afc51aaaSDarrick J. Wong 297afc51aaaSDarrick J. Wong int 298afc51aaaSDarrick J. Wong iomap_readpage(struct page *page, const struct iomap_ops *ops) 299afc51aaaSDarrick J. Wong { 300afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { .cur_page = page }; 301afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 302afc51aaaSDarrick J. Wong unsigned poff; 303afc51aaaSDarrick J. Wong loff_t ret; 304afc51aaaSDarrick J. Wong 305*9e91c572SChristoph Hellwig trace_iomap_readpage(page->mapping->host, 1); 306*9e91c572SChristoph Hellwig 307afc51aaaSDarrick J. Wong for (poff = 0; poff < PAGE_SIZE; poff += ret) { 308afc51aaaSDarrick J. Wong ret = iomap_apply(inode, page_offset(page) + poff, 309afc51aaaSDarrick J. Wong PAGE_SIZE - poff, 0, ops, &ctx, 310afc51aaaSDarrick J. Wong iomap_readpage_actor); 311afc51aaaSDarrick J. Wong if (ret <= 0) { 312afc51aaaSDarrick J. Wong WARN_ON_ONCE(ret == 0); 313afc51aaaSDarrick J. Wong SetPageError(page); 314afc51aaaSDarrick J. Wong break; 315afc51aaaSDarrick J. Wong } 316afc51aaaSDarrick J. Wong } 317afc51aaaSDarrick J. Wong 318afc51aaaSDarrick J. Wong if (ctx.bio) { 319afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 320afc51aaaSDarrick J. Wong WARN_ON_ONCE(!ctx.cur_page_in_bio); 321afc51aaaSDarrick J. Wong } else { 322afc51aaaSDarrick J. Wong WARN_ON_ONCE(ctx.cur_page_in_bio); 323afc51aaaSDarrick J. Wong unlock_page(page); 324afc51aaaSDarrick J. Wong } 325afc51aaaSDarrick J. Wong 326afc51aaaSDarrick J. Wong /* 327afc51aaaSDarrick J. Wong * Just like mpage_readpages and block_read_full_page we always 328afc51aaaSDarrick J. Wong * return 0 and just mark the page as PageError on errors. This 329afc51aaaSDarrick J. Wong * should be cleaned up all through the stack eventually. 330afc51aaaSDarrick J. Wong */ 331afc51aaaSDarrick J. Wong return 0; 332afc51aaaSDarrick J. Wong } 333afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpage); 334afc51aaaSDarrick J. Wong 335afc51aaaSDarrick J. Wong static struct page * 336afc51aaaSDarrick J. Wong iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, 337afc51aaaSDarrick J. Wong loff_t length, loff_t *done) 338afc51aaaSDarrick J. Wong { 339afc51aaaSDarrick J. Wong while (!list_empty(pages)) { 340afc51aaaSDarrick J. Wong struct page *page = lru_to_page(pages); 341afc51aaaSDarrick J. Wong 342afc51aaaSDarrick J. Wong if (page_offset(page) >= (u64)pos + length) 343afc51aaaSDarrick J. Wong break; 344afc51aaaSDarrick J. Wong 345afc51aaaSDarrick J. Wong list_del(&page->lru); 346afc51aaaSDarrick J. Wong if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, 347afc51aaaSDarrick J. Wong GFP_NOFS)) 348afc51aaaSDarrick J. Wong return page; 349afc51aaaSDarrick J. Wong 350afc51aaaSDarrick J. Wong /* 351afc51aaaSDarrick J. Wong * If we already have a page in the page cache at index we are 352afc51aaaSDarrick J. Wong * done. Upper layers don't care if it is uptodate after the 353afc51aaaSDarrick J. Wong * readpages call itself as every page gets checked again once 354afc51aaaSDarrick J. Wong * actually needed. 355afc51aaaSDarrick J. Wong */ 356afc51aaaSDarrick J. Wong *done += PAGE_SIZE; 357afc51aaaSDarrick J. Wong put_page(page); 358afc51aaaSDarrick J. Wong } 359afc51aaaSDarrick J. Wong 360afc51aaaSDarrick J. Wong return NULL; 361afc51aaaSDarrick J. Wong } 362afc51aaaSDarrick J. Wong 363afc51aaaSDarrick J. Wong static loff_t 364afc51aaaSDarrick J. Wong iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, 365afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 366afc51aaaSDarrick J. Wong { 367afc51aaaSDarrick J. Wong struct iomap_readpage_ctx *ctx = data; 368afc51aaaSDarrick J. Wong loff_t done, ret; 369afc51aaaSDarrick J. Wong 370afc51aaaSDarrick J. Wong for (done = 0; done < length; done += ret) { 371afc51aaaSDarrick J. Wong if (ctx->cur_page && offset_in_page(pos + done) == 0) { 372afc51aaaSDarrick J. Wong if (!ctx->cur_page_in_bio) 373afc51aaaSDarrick J. Wong unlock_page(ctx->cur_page); 374afc51aaaSDarrick J. Wong put_page(ctx->cur_page); 375afc51aaaSDarrick J. Wong ctx->cur_page = NULL; 376afc51aaaSDarrick J. Wong } 377afc51aaaSDarrick J. Wong if (!ctx->cur_page) { 378afc51aaaSDarrick J. Wong ctx->cur_page = iomap_next_page(inode, ctx->pages, 379afc51aaaSDarrick J. Wong pos, length, &done); 380afc51aaaSDarrick J. Wong if (!ctx->cur_page) 381afc51aaaSDarrick J. Wong break; 382afc51aaaSDarrick J. Wong ctx->cur_page_in_bio = false; 383afc51aaaSDarrick J. Wong } 384afc51aaaSDarrick J. Wong ret = iomap_readpage_actor(inode, pos + done, length - done, 385afc51aaaSDarrick J. Wong ctx, iomap); 386afc51aaaSDarrick J. Wong } 387afc51aaaSDarrick J. Wong 388afc51aaaSDarrick J. Wong return done; 389afc51aaaSDarrick J. Wong } 390afc51aaaSDarrick J. Wong 391afc51aaaSDarrick J. Wong int 392afc51aaaSDarrick J. Wong iomap_readpages(struct address_space *mapping, struct list_head *pages, 393afc51aaaSDarrick J. Wong unsigned nr_pages, const struct iomap_ops *ops) 394afc51aaaSDarrick J. Wong { 395afc51aaaSDarrick J. Wong struct iomap_readpage_ctx ctx = { 396afc51aaaSDarrick J. Wong .pages = pages, 397afc51aaaSDarrick J. Wong .is_readahead = true, 398afc51aaaSDarrick J. Wong }; 399afc51aaaSDarrick J. Wong loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); 400afc51aaaSDarrick J. Wong loff_t last = page_offset(list_entry(pages->next, struct page, lru)); 401afc51aaaSDarrick J. Wong loff_t length = last - pos + PAGE_SIZE, ret = 0; 402afc51aaaSDarrick J. Wong 403*9e91c572SChristoph Hellwig trace_iomap_readpages(mapping->host, nr_pages); 404*9e91c572SChristoph Hellwig 405afc51aaaSDarrick J. Wong while (length > 0) { 406afc51aaaSDarrick J. Wong ret = iomap_apply(mapping->host, pos, length, 0, ops, 407afc51aaaSDarrick J. Wong &ctx, iomap_readpages_actor); 408afc51aaaSDarrick J. Wong if (ret <= 0) { 409afc51aaaSDarrick J. Wong WARN_ON_ONCE(ret == 0); 410afc51aaaSDarrick J. Wong goto done; 411afc51aaaSDarrick J. Wong } 412afc51aaaSDarrick J. Wong pos += ret; 413afc51aaaSDarrick J. Wong length -= ret; 414afc51aaaSDarrick J. Wong } 415afc51aaaSDarrick J. Wong ret = 0; 416afc51aaaSDarrick J. Wong done: 417afc51aaaSDarrick J. Wong if (ctx.bio) 418afc51aaaSDarrick J. Wong submit_bio(ctx.bio); 419afc51aaaSDarrick J. Wong if (ctx.cur_page) { 420afc51aaaSDarrick J. Wong if (!ctx.cur_page_in_bio) 421afc51aaaSDarrick J. Wong unlock_page(ctx.cur_page); 422afc51aaaSDarrick J. Wong put_page(ctx.cur_page); 423afc51aaaSDarrick J. Wong } 424afc51aaaSDarrick J. Wong 425afc51aaaSDarrick J. Wong /* 426afc51aaaSDarrick J. Wong * Check that we didn't lose a page due to the arcance calling 427afc51aaaSDarrick J. Wong * conventions.. 428afc51aaaSDarrick J. Wong */ 429afc51aaaSDarrick J. Wong WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); 430afc51aaaSDarrick J. Wong return ret; 431afc51aaaSDarrick J. Wong } 432afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpages); 433afc51aaaSDarrick J. Wong 434afc51aaaSDarrick J. Wong /* 435afc51aaaSDarrick J. Wong * iomap_is_partially_uptodate checks whether blocks within a page are 436afc51aaaSDarrick J. Wong * uptodate or not. 437afc51aaaSDarrick J. Wong * 438afc51aaaSDarrick J. Wong * Returns true if all blocks which correspond to a file portion 439afc51aaaSDarrick J. Wong * we want to read within the page are uptodate. 440afc51aaaSDarrick J. Wong */ 441afc51aaaSDarrick J. Wong int 442afc51aaaSDarrick J. Wong iomap_is_partially_uptodate(struct page *page, unsigned long from, 443afc51aaaSDarrick J. Wong unsigned long count) 444afc51aaaSDarrick J. Wong { 445afc51aaaSDarrick J. Wong struct iomap_page *iop = to_iomap_page(page); 446afc51aaaSDarrick J. Wong struct inode *inode = page->mapping->host; 447afc51aaaSDarrick J. Wong unsigned len, first, last; 448afc51aaaSDarrick J. Wong unsigned i; 449afc51aaaSDarrick J. Wong 450afc51aaaSDarrick J. Wong /* Limit range to one page */ 451afc51aaaSDarrick J. Wong len = min_t(unsigned, PAGE_SIZE - from, count); 452afc51aaaSDarrick J. Wong 453afc51aaaSDarrick J. Wong /* First and last blocks in range within page */ 454afc51aaaSDarrick J. Wong first = from >> inode->i_blkbits; 455afc51aaaSDarrick J. Wong last = (from + len - 1) >> inode->i_blkbits; 456afc51aaaSDarrick J. Wong 457afc51aaaSDarrick J. Wong if (iop) { 458afc51aaaSDarrick J. Wong for (i = first; i <= last; i++) 459afc51aaaSDarrick J. Wong if (!test_bit(i, iop->uptodate)) 460afc51aaaSDarrick J. Wong return 0; 461afc51aaaSDarrick J. Wong return 1; 462afc51aaaSDarrick J. Wong } 463afc51aaaSDarrick J. Wong 464afc51aaaSDarrick J. Wong return 0; 465afc51aaaSDarrick J. Wong } 466afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 467afc51aaaSDarrick J. Wong 468afc51aaaSDarrick J. Wong int 469afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask) 470afc51aaaSDarrick J. Wong { 471*9e91c572SChristoph Hellwig trace_iomap_releasepage(page->mapping->host, page, 0, 0); 472*9e91c572SChristoph Hellwig 473afc51aaaSDarrick J. Wong /* 474afc51aaaSDarrick J. Wong * mm accommodates an old ext3 case where clean pages might not have had 475afc51aaaSDarrick J. Wong * the dirty bit cleared. Thus, it can send actual dirty pages to 476afc51aaaSDarrick J. Wong * ->releasepage() via shrink_active_list(), skip those here. 477afc51aaaSDarrick J. Wong */ 478afc51aaaSDarrick J. Wong if (PageDirty(page) || PageWriteback(page)) 479afc51aaaSDarrick J. Wong return 0; 480afc51aaaSDarrick J. Wong iomap_page_release(page); 481afc51aaaSDarrick J. Wong return 1; 482afc51aaaSDarrick J. Wong } 483afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage); 484afc51aaaSDarrick J. Wong 485afc51aaaSDarrick J. Wong void 486afc51aaaSDarrick J. Wong iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 487afc51aaaSDarrick J. Wong { 488*9e91c572SChristoph Hellwig trace_iomap_invalidatepage(page->mapping->host, page, offset, len); 489*9e91c572SChristoph Hellwig 490afc51aaaSDarrick J. Wong /* 491afc51aaaSDarrick J. Wong * If we are invalidating the entire page, clear the dirty state from it 492afc51aaaSDarrick J. Wong * and release it to avoid unnecessary buildup of the LRU. 493afc51aaaSDarrick J. Wong */ 494afc51aaaSDarrick J. Wong if (offset == 0 && len == PAGE_SIZE) { 495afc51aaaSDarrick J. Wong WARN_ON_ONCE(PageWriteback(page)); 496afc51aaaSDarrick J. Wong cancel_dirty_page(page); 497afc51aaaSDarrick J. Wong iomap_page_release(page); 498afc51aaaSDarrick J. Wong } 499afc51aaaSDarrick J. Wong } 500afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_invalidatepage); 501afc51aaaSDarrick J. Wong 502afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION 503afc51aaaSDarrick J. Wong int 504afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage, 505afc51aaaSDarrick J. Wong struct page *page, enum migrate_mode mode) 506afc51aaaSDarrick J. Wong { 507afc51aaaSDarrick J. Wong int ret; 508afc51aaaSDarrick J. Wong 50926473f83SLinus Torvalds ret = migrate_page_move_mapping(mapping, newpage, page, 0); 510afc51aaaSDarrick J. Wong if (ret != MIGRATEPAGE_SUCCESS) 511afc51aaaSDarrick J. Wong return ret; 512afc51aaaSDarrick J. Wong 513afc51aaaSDarrick J. Wong if (page_has_private(page)) { 514afc51aaaSDarrick J. Wong ClearPagePrivate(page); 515afc51aaaSDarrick J. Wong get_page(newpage); 516afc51aaaSDarrick J. Wong set_page_private(newpage, page_private(page)); 517afc51aaaSDarrick J. Wong set_page_private(page, 0); 518afc51aaaSDarrick J. Wong put_page(page); 519afc51aaaSDarrick J. Wong SetPagePrivate(newpage); 520afc51aaaSDarrick J. Wong } 521afc51aaaSDarrick J. Wong 522afc51aaaSDarrick J. Wong if (mode != MIGRATE_SYNC_NO_COPY) 523afc51aaaSDarrick J. Wong migrate_page_copy(newpage, page); 524afc51aaaSDarrick J. Wong else 525afc51aaaSDarrick J. Wong migrate_page_states(newpage, page); 526afc51aaaSDarrick J. Wong return MIGRATEPAGE_SUCCESS; 527afc51aaaSDarrick J. Wong } 528afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page); 529afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */ 530afc51aaaSDarrick J. Wong 531afc51aaaSDarrick J. Wong static void 532afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 533afc51aaaSDarrick J. Wong { 534afc51aaaSDarrick J. Wong loff_t i_size = i_size_read(inode); 535afc51aaaSDarrick J. Wong 536afc51aaaSDarrick J. Wong /* 537afc51aaaSDarrick J. Wong * Only truncate newly allocated pages beyoned EOF, even if the 538afc51aaaSDarrick J. Wong * write started inside the existing inode size. 539afc51aaaSDarrick J. Wong */ 540afc51aaaSDarrick J. Wong if (pos + len > i_size) 541afc51aaaSDarrick J. Wong truncate_pagecache_range(inode, max(pos, i_size), pos + len); 542afc51aaaSDarrick J. Wong } 543afc51aaaSDarrick J. Wong 544afc51aaaSDarrick J. Wong static int 545afc51aaaSDarrick J. Wong iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, 546afc51aaaSDarrick J. Wong unsigned poff, unsigned plen, unsigned from, unsigned to, 547afc51aaaSDarrick J. Wong struct iomap *iomap) 548afc51aaaSDarrick J. Wong { 549afc51aaaSDarrick J. Wong struct bio_vec bvec; 550afc51aaaSDarrick J. Wong struct bio bio; 551afc51aaaSDarrick J. Wong 552009d8d84SChristoph Hellwig if (iomap_block_needs_zeroing(inode, iomap, block_start)) { 553afc51aaaSDarrick J. Wong zero_user_segments(page, poff, from, to, poff + plen); 554afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, poff, plen); 555afc51aaaSDarrick J. Wong return 0; 556afc51aaaSDarrick J. Wong } 557afc51aaaSDarrick J. Wong 558afc51aaaSDarrick J. Wong bio_init(&bio, &bvec, 1); 559afc51aaaSDarrick J. Wong bio.bi_opf = REQ_OP_READ; 560afc51aaaSDarrick J. Wong bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 561afc51aaaSDarrick J. Wong bio_set_dev(&bio, iomap->bdev); 562afc51aaaSDarrick J. Wong __bio_add_page(&bio, page, plen, poff); 563afc51aaaSDarrick J. Wong return submit_bio_wait(&bio); 564afc51aaaSDarrick J. Wong } 565afc51aaaSDarrick J. Wong 566afc51aaaSDarrick J. Wong static int 567afc51aaaSDarrick J. Wong __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, 568afc51aaaSDarrick J. Wong struct page *page, struct iomap *iomap) 569afc51aaaSDarrick J. Wong { 570afc51aaaSDarrick J. Wong struct iomap_page *iop = iomap_page_create(inode, page); 571afc51aaaSDarrick J. Wong loff_t block_size = i_blocksize(inode); 572afc51aaaSDarrick J. Wong loff_t block_start = pos & ~(block_size - 1); 573afc51aaaSDarrick J. Wong loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); 574afc51aaaSDarrick J. Wong unsigned from = offset_in_page(pos), to = from + len, poff, plen; 575afc51aaaSDarrick J. Wong int status = 0; 576afc51aaaSDarrick J. Wong 577afc51aaaSDarrick J. Wong if (PageUptodate(page)) 578afc51aaaSDarrick J. Wong return 0; 579afc51aaaSDarrick J. Wong 580afc51aaaSDarrick J. Wong do { 581afc51aaaSDarrick J. Wong iomap_adjust_read_range(inode, iop, &block_start, 582afc51aaaSDarrick J. Wong block_end - block_start, &poff, &plen); 583afc51aaaSDarrick J. Wong if (plen == 0) 584afc51aaaSDarrick J. Wong break; 585afc51aaaSDarrick J. Wong 586afc51aaaSDarrick J. Wong if ((from > poff && from < poff + plen) || 587afc51aaaSDarrick J. Wong (to > poff && to < poff + plen)) { 588afc51aaaSDarrick J. Wong status = iomap_read_page_sync(inode, block_start, page, 589afc51aaaSDarrick J. Wong poff, plen, from, to, iomap); 590afc51aaaSDarrick J. Wong if (status) 591afc51aaaSDarrick J. Wong break; 592afc51aaaSDarrick J. Wong } 593afc51aaaSDarrick J. Wong 594afc51aaaSDarrick J. Wong } while ((block_start += plen) < block_end); 595afc51aaaSDarrick J. Wong 596afc51aaaSDarrick J. Wong return status; 597afc51aaaSDarrick J. Wong } 598afc51aaaSDarrick J. Wong 599afc51aaaSDarrick J. Wong static int 600afc51aaaSDarrick J. Wong iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, 601afc51aaaSDarrick J. Wong struct page **pagep, struct iomap *iomap) 602afc51aaaSDarrick J. Wong { 603afc51aaaSDarrick J. Wong const struct iomap_page_ops *page_ops = iomap->page_ops; 604afc51aaaSDarrick J. Wong pgoff_t index = pos >> PAGE_SHIFT; 605afc51aaaSDarrick J. Wong struct page *page; 606afc51aaaSDarrick J. Wong int status = 0; 607afc51aaaSDarrick J. Wong 608afc51aaaSDarrick J. Wong BUG_ON(pos + len > iomap->offset + iomap->length); 609afc51aaaSDarrick J. Wong 610afc51aaaSDarrick J. Wong if (fatal_signal_pending(current)) 611afc51aaaSDarrick J. Wong return -EINTR; 612afc51aaaSDarrick J. Wong 613afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_prepare) { 614afc51aaaSDarrick J. Wong status = page_ops->page_prepare(inode, pos, len, iomap); 615afc51aaaSDarrick J. Wong if (status) 616afc51aaaSDarrick J. Wong return status; 617afc51aaaSDarrick J. Wong } 618afc51aaaSDarrick J. Wong 619afc51aaaSDarrick J. Wong page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 620afc51aaaSDarrick J. Wong if (!page) { 621afc51aaaSDarrick J. Wong status = -ENOMEM; 622afc51aaaSDarrick J. Wong goto out_no_page; 623afc51aaaSDarrick J. Wong } 624afc51aaaSDarrick J. Wong 625afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) 626afc51aaaSDarrick J. Wong iomap_read_inline_data(inode, page, iomap); 627afc51aaaSDarrick J. Wong else if (iomap->flags & IOMAP_F_BUFFER_HEAD) 628afc51aaaSDarrick J. Wong status = __block_write_begin_int(page, pos, len, NULL, iomap); 629afc51aaaSDarrick J. Wong else 630afc51aaaSDarrick J. Wong status = __iomap_write_begin(inode, pos, len, page, iomap); 631afc51aaaSDarrick J. Wong 632afc51aaaSDarrick J. Wong if (unlikely(status)) 633afc51aaaSDarrick J. Wong goto out_unlock; 634afc51aaaSDarrick J. Wong 635afc51aaaSDarrick J. Wong *pagep = page; 636afc51aaaSDarrick J. Wong return 0; 637afc51aaaSDarrick J. Wong 638afc51aaaSDarrick J. Wong out_unlock: 639afc51aaaSDarrick J. Wong unlock_page(page); 640afc51aaaSDarrick J. Wong put_page(page); 641afc51aaaSDarrick J. Wong iomap_write_failed(inode, pos, len); 642afc51aaaSDarrick J. Wong 643afc51aaaSDarrick J. Wong out_no_page: 644afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 645afc51aaaSDarrick J. Wong page_ops->page_done(inode, pos, 0, NULL, iomap); 646afc51aaaSDarrick J. Wong return status; 647afc51aaaSDarrick J. Wong } 648afc51aaaSDarrick J. Wong 649afc51aaaSDarrick J. Wong int 650afc51aaaSDarrick J. Wong iomap_set_page_dirty(struct page *page) 651afc51aaaSDarrick J. Wong { 652afc51aaaSDarrick J. Wong struct address_space *mapping = page_mapping(page); 653afc51aaaSDarrick J. Wong int newly_dirty; 654afc51aaaSDarrick J. Wong 655afc51aaaSDarrick J. Wong if (unlikely(!mapping)) 656afc51aaaSDarrick J. Wong return !TestSetPageDirty(page); 657afc51aaaSDarrick J. Wong 658afc51aaaSDarrick J. Wong /* 659afc51aaaSDarrick J. Wong * Lock out page->mem_cgroup migration to keep PageDirty 660afc51aaaSDarrick J. Wong * synchronized with per-memcg dirty page counters. 661afc51aaaSDarrick J. Wong */ 662afc51aaaSDarrick J. Wong lock_page_memcg(page); 663afc51aaaSDarrick J. Wong newly_dirty = !TestSetPageDirty(page); 664afc51aaaSDarrick J. Wong if (newly_dirty) 665afc51aaaSDarrick J. Wong __set_page_dirty(page, mapping, 0); 666afc51aaaSDarrick J. Wong unlock_page_memcg(page); 667afc51aaaSDarrick J. Wong 668afc51aaaSDarrick J. Wong if (newly_dirty) 669afc51aaaSDarrick J. Wong __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 670afc51aaaSDarrick J. Wong return newly_dirty; 671afc51aaaSDarrick J. Wong } 672afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_set_page_dirty); 673afc51aaaSDarrick J. Wong 674afc51aaaSDarrick J. Wong static int 675afc51aaaSDarrick J. Wong __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 676afc51aaaSDarrick J. Wong unsigned copied, struct page *page, struct iomap *iomap) 677afc51aaaSDarrick J. Wong { 678afc51aaaSDarrick J. Wong flush_dcache_page(page); 679afc51aaaSDarrick J. Wong 680afc51aaaSDarrick J. Wong /* 681afc51aaaSDarrick J. Wong * The blocks that were entirely written will now be uptodate, so we 682afc51aaaSDarrick J. Wong * don't have to worry about a readpage reading them and overwriting a 683afc51aaaSDarrick J. Wong * partial write. However if we have encountered a short write and only 684afc51aaaSDarrick J. Wong * partially written into a block, it will not be marked uptodate, so a 685afc51aaaSDarrick J. Wong * readpage might come in and destroy our partial write. 686afc51aaaSDarrick J. Wong * 687afc51aaaSDarrick J. Wong * Do the simplest thing, and just treat any short write to a non 688afc51aaaSDarrick J. Wong * uptodate page as a zero-length write, and force the caller to redo 689afc51aaaSDarrick J. Wong * the whole thing. 690afc51aaaSDarrick J. Wong */ 691afc51aaaSDarrick J. Wong if (unlikely(copied < len && !PageUptodate(page))) 692afc51aaaSDarrick J. Wong return 0; 693afc51aaaSDarrick J. Wong iomap_set_range_uptodate(page, offset_in_page(pos), len); 694afc51aaaSDarrick J. Wong iomap_set_page_dirty(page); 695afc51aaaSDarrick J. Wong return copied; 696afc51aaaSDarrick J. Wong } 697afc51aaaSDarrick J. Wong 698afc51aaaSDarrick J. Wong static int 699afc51aaaSDarrick J. Wong iomap_write_end_inline(struct inode *inode, struct page *page, 700afc51aaaSDarrick J. Wong struct iomap *iomap, loff_t pos, unsigned copied) 701afc51aaaSDarrick J. Wong { 702afc51aaaSDarrick J. Wong void *addr; 703afc51aaaSDarrick J. Wong 704afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 705afc51aaaSDarrick J. Wong BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); 706afc51aaaSDarrick J. Wong 707afc51aaaSDarrick J. Wong addr = kmap_atomic(page); 708afc51aaaSDarrick J. Wong memcpy(iomap->inline_data + pos, addr + pos, copied); 709afc51aaaSDarrick J. Wong kunmap_atomic(addr); 710afc51aaaSDarrick J. Wong 711afc51aaaSDarrick J. Wong mark_inode_dirty(inode); 712afc51aaaSDarrick J. Wong return copied; 713afc51aaaSDarrick J. Wong } 714afc51aaaSDarrick J. Wong 715afc51aaaSDarrick J. Wong static int 716afc51aaaSDarrick J. Wong iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 717afc51aaaSDarrick J. Wong unsigned copied, struct page *page, struct iomap *iomap) 718afc51aaaSDarrick J. Wong { 719afc51aaaSDarrick J. Wong const struct iomap_page_ops *page_ops = iomap->page_ops; 720afc51aaaSDarrick J. Wong loff_t old_size = inode->i_size; 721afc51aaaSDarrick J. Wong int ret; 722afc51aaaSDarrick J. Wong 723afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_INLINE) { 724afc51aaaSDarrick J. Wong ret = iomap_write_end_inline(inode, page, iomap, pos, copied); 725afc51aaaSDarrick J. Wong } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 726afc51aaaSDarrick J. Wong ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, 727afc51aaaSDarrick J. Wong page, NULL); 728afc51aaaSDarrick J. Wong } else { 729afc51aaaSDarrick J. Wong ret = __iomap_write_end(inode, pos, len, copied, page, iomap); 730afc51aaaSDarrick J. Wong } 731afc51aaaSDarrick J. Wong 732afc51aaaSDarrick J. Wong /* 733afc51aaaSDarrick J. Wong * Update the in-memory inode size after copying the data into the page 734afc51aaaSDarrick J. Wong * cache. It's up to the file system to write the updated size to disk, 735afc51aaaSDarrick J. Wong * preferably after I/O completion so that no stale data is exposed. 736afc51aaaSDarrick J. Wong */ 737afc51aaaSDarrick J. Wong if (pos + ret > old_size) { 738afc51aaaSDarrick J. Wong i_size_write(inode, pos + ret); 739afc51aaaSDarrick J. Wong iomap->flags |= IOMAP_F_SIZE_CHANGED; 740afc51aaaSDarrick J. Wong } 741afc51aaaSDarrick J. Wong unlock_page(page); 742afc51aaaSDarrick J. Wong 743afc51aaaSDarrick J. Wong if (old_size < pos) 744afc51aaaSDarrick J. Wong pagecache_isize_extended(inode, old_size, pos); 745afc51aaaSDarrick J. Wong if (page_ops && page_ops->page_done) 746afc51aaaSDarrick J. Wong page_ops->page_done(inode, pos, ret, page, iomap); 747afc51aaaSDarrick J. Wong put_page(page); 748afc51aaaSDarrick J. Wong 749afc51aaaSDarrick J. Wong if (ret < len) 750afc51aaaSDarrick J. Wong iomap_write_failed(inode, pos, len); 751afc51aaaSDarrick J. Wong return ret; 752afc51aaaSDarrick J. Wong } 753afc51aaaSDarrick J. Wong 754afc51aaaSDarrick J. Wong static loff_t 755afc51aaaSDarrick J. Wong iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 756afc51aaaSDarrick J. Wong struct iomap *iomap) 757afc51aaaSDarrick J. Wong { 758afc51aaaSDarrick J. Wong struct iov_iter *i = data; 759afc51aaaSDarrick J. Wong long status = 0; 760afc51aaaSDarrick J. Wong ssize_t written = 0; 761afc51aaaSDarrick J. Wong unsigned int flags = AOP_FLAG_NOFS; 762afc51aaaSDarrick J. Wong 763afc51aaaSDarrick J. Wong do { 764afc51aaaSDarrick J. Wong struct page *page; 765afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 766afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 767afc51aaaSDarrick J. Wong size_t copied; /* Bytes copied from user */ 768afc51aaaSDarrick J. Wong 769afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 770afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 771afc51aaaSDarrick J. Wong iov_iter_count(i)); 772afc51aaaSDarrick J. Wong again: 773afc51aaaSDarrick J. Wong if (bytes > length) 774afc51aaaSDarrick J. Wong bytes = length; 775afc51aaaSDarrick J. Wong 776afc51aaaSDarrick J. Wong /* 777afc51aaaSDarrick J. Wong * Bring in the user page that we will copy from _first_. 778afc51aaaSDarrick J. Wong * Otherwise there's a nasty deadlock on copying from the 779afc51aaaSDarrick J. Wong * same page as we're writing to, without it being marked 780afc51aaaSDarrick J. Wong * up-to-date. 781afc51aaaSDarrick J. Wong * 782afc51aaaSDarrick J. Wong * Not only is this an optimisation, but it is also required 783afc51aaaSDarrick J. Wong * to check that the address is actually valid, when atomic 784afc51aaaSDarrick J. Wong * usercopies are used, below. 785afc51aaaSDarrick J. Wong */ 786afc51aaaSDarrick J. Wong if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 787afc51aaaSDarrick J. Wong status = -EFAULT; 788afc51aaaSDarrick J. Wong break; 789afc51aaaSDarrick J. Wong } 790afc51aaaSDarrick J. Wong 791afc51aaaSDarrick J. Wong status = iomap_write_begin(inode, pos, bytes, flags, &page, 792afc51aaaSDarrick J. Wong iomap); 793afc51aaaSDarrick J. Wong if (unlikely(status)) 794afc51aaaSDarrick J. Wong break; 795afc51aaaSDarrick J. Wong 796afc51aaaSDarrick J. Wong if (mapping_writably_mapped(inode->i_mapping)) 797afc51aaaSDarrick J. Wong flush_dcache_page(page); 798afc51aaaSDarrick J. Wong 799afc51aaaSDarrick J. Wong copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 800afc51aaaSDarrick J. Wong 801afc51aaaSDarrick J. Wong flush_dcache_page(page); 802afc51aaaSDarrick J. Wong 803afc51aaaSDarrick J. Wong status = iomap_write_end(inode, pos, bytes, copied, page, 804afc51aaaSDarrick J. Wong iomap); 805afc51aaaSDarrick J. Wong if (unlikely(status < 0)) 806afc51aaaSDarrick J. Wong break; 807afc51aaaSDarrick J. Wong copied = status; 808afc51aaaSDarrick J. Wong 809afc51aaaSDarrick J. Wong cond_resched(); 810afc51aaaSDarrick J. Wong 811afc51aaaSDarrick J. Wong iov_iter_advance(i, copied); 812afc51aaaSDarrick J. Wong if (unlikely(copied == 0)) { 813afc51aaaSDarrick J. Wong /* 814afc51aaaSDarrick J. Wong * If we were unable to copy any data at all, we must 815afc51aaaSDarrick J. Wong * fall back to a single segment length write. 816afc51aaaSDarrick J. Wong * 817afc51aaaSDarrick J. Wong * If we didn't fallback here, we could livelock 818afc51aaaSDarrick J. Wong * because not all segments in the iov can be copied at 819afc51aaaSDarrick J. Wong * once without a pagefault. 820afc51aaaSDarrick J. Wong */ 821afc51aaaSDarrick J. Wong bytes = min_t(unsigned long, PAGE_SIZE - offset, 822afc51aaaSDarrick J. Wong iov_iter_single_seg_count(i)); 823afc51aaaSDarrick J. Wong goto again; 824afc51aaaSDarrick J. Wong } 825afc51aaaSDarrick J. Wong pos += copied; 826afc51aaaSDarrick J. Wong written += copied; 827afc51aaaSDarrick J. Wong length -= copied; 828afc51aaaSDarrick J. Wong 829afc51aaaSDarrick J. Wong balance_dirty_pages_ratelimited(inode->i_mapping); 830afc51aaaSDarrick J. Wong } while (iov_iter_count(i) && length); 831afc51aaaSDarrick J. Wong 832afc51aaaSDarrick J. Wong return written ? written : status; 833afc51aaaSDarrick J. Wong } 834afc51aaaSDarrick J. Wong 835afc51aaaSDarrick J. Wong ssize_t 836afc51aaaSDarrick J. Wong iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, 837afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 838afc51aaaSDarrick J. Wong { 839afc51aaaSDarrick J. Wong struct inode *inode = iocb->ki_filp->f_mapping->host; 840afc51aaaSDarrick J. Wong loff_t pos = iocb->ki_pos, ret = 0, written = 0; 841afc51aaaSDarrick J. Wong 842afc51aaaSDarrick J. Wong while (iov_iter_count(iter)) { 843afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, iov_iter_count(iter), 844afc51aaaSDarrick J. Wong IOMAP_WRITE, ops, iter, iomap_write_actor); 845afc51aaaSDarrick J. Wong if (ret <= 0) 846afc51aaaSDarrick J. Wong break; 847afc51aaaSDarrick J. Wong pos += ret; 848afc51aaaSDarrick J. Wong written += ret; 849afc51aaaSDarrick J. Wong } 850afc51aaaSDarrick J. Wong 851afc51aaaSDarrick J. Wong return written ? written : ret; 852afc51aaaSDarrick J. Wong } 853afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 854afc51aaaSDarrick J. Wong 855afc51aaaSDarrick J. Wong static struct page * 856afc51aaaSDarrick J. Wong __iomap_read_page(struct inode *inode, loff_t offset) 857afc51aaaSDarrick J. Wong { 858afc51aaaSDarrick J. Wong struct address_space *mapping = inode->i_mapping; 859afc51aaaSDarrick J. Wong struct page *page; 860afc51aaaSDarrick J. Wong 861afc51aaaSDarrick J. Wong page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); 862afc51aaaSDarrick J. Wong if (IS_ERR(page)) 863afc51aaaSDarrick J. Wong return page; 864afc51aaaSDarrick J. Wong if (!PageUptodate(page)) { 865afc51aaaSDarrick J. Wong put_page(page); 866afc51aaaSDarrick J. Wong return ERR_PTR(-EIO); 867afc51aaaSDarrick J. Wong } 868afc51aaaSDarrick J. Wong return page; 869afc51aaaSDarrick J. Wong } 870afc51aaaSDarrick J. Wong 871afc51aaaSDarrick J. Wong static loff_t 872afc51aaaSDarrick J. Wong iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 873afc51aaaSDarrick J. Wong struct iomap *iomap) 874afc51aaaSDarrick J. Wong { 875afc51aaaSDarrick J. Wong long status = 0; 876afc51aaaSDarrick J. Wong ssize_t written = 0; 877afc51aaaSDarrick J. Wong 878afc51aaaSDarrick J. Wong do { 879afc51aaaSDarrick J. Wong struct page *page, *rpage; 880afc51aaaSDarrick J. Wong unsigned long offset; /* Offset into pagecache page */ 881afc51aaaSDarrick J. Wong unsigned long bytes; /* Bytes to write to page */ 882afc51aaaSDarrick J. Wong 883afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 884afc51aaaSDarrick J. Wong bytes = min_t(loff_t, PAGE_SIZE - offset, length); 885afc51aaaSDarrick J. Wong 886afc51aaaSDarrick J. Wong rpage = __iomap_read_page(inode, pos); 887afc51aaaSDarrick J. Wong if (IS_ERR(rpage)) 888afc51aaaSDarrick J. Wong return PTR_ERR(rpage); 889afc51aaaSDarrick J. Wong 890afc51aaaSDarrick J. Wong status = iomap_write_begin(inode, pos, bytes, 891afc51aaaSDarrick J. Wong AOP_FLAG_NOFS, &page, iomap); 892afc51aaaSDarrick J. Wong put_page(rpage); 893afc51aaaSDarrick J. Wong if (unlikely(status)) 894afc51aaaSDarrick J. Wong return status; 895afc51aaaSDarrick J. Wong 896afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 897afc51aaaSDarrick J. Wong 898afc51aaaSDarrick J. Wong status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); 899afc51aaaSDarrick J. Wong if (unlikely(status <= 0)) { 900afc51aaaSDarrick J. Wong if (WARN_ON_ONCE(status == 0)) 901afc51aaaSDarrick J. Wong return -EIO; 902afc51aaaSDarrick J. Wong return status; 903afc51aaaSDarrick J. Wong } 904afc51aaaSDarrick J. Wong 905afc51aaaSDarrick J. Wong cond_resched(); 906afc51aaaSDarrick J. Wong 907afc51aaaSDarrick J. Wong pos += status; 908afc51aaaSDarrick J. Wong written += status; 909afc51aaaSDarrick J. Wong length -= status; 910afc51aaaSDarrick J. Wong 911afc51aaaSDarrick J. Wong balance_dirty_pages_ratelimited(inode->i_mapping); 912afc51aaaSDarrick J. Wong } while (length); 913afc51aaaSDarrick J. Wong 914afc51aaaSDarrick J. Wong return written; 915afc51aaaSDarrick J. Wong } 916afc51aaaSDarrick J. Wong 917afc51aaaSDarrick J. Wong int 918afc51aaaSDarrick J. Wong iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, 919afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 920afc51aaaSDarrick J. Wong { 921afc51aaaSDarrick J. Wong loff_t ret; 922afc51aaaSDarrick J. Wong 923afc51aaaSDarrick J. Wong while (len) { 924afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, 925afc51aaaSDarrick J. Wong iomap_dirty_actor); 926afc51aaaSDarrick J. Wong if (ret <= 0) 927afc51aaaSDarrick J. Wong return ret; 928afc51aaaSDarrick J. Wong pos += ret; 929afc51aaaSDarrick J. Wong len -= ret; 930afc51aaaSDarrick J. Wong } 931afc51aaaSDarrick J. Wong 932afc51aaaSDarrick J. Wong return 0; 933afc51aaaSDarrick J. Wong } 934afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_dirty); 935afc51aaaSDarrick J. Wong 936afc51aaaSDarrick J. Wong static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, 937afc51aaaSDarrick J. Wong unsigned bytes, struct iomap *iomap) 938afc51aaaSDarrick J. Wong { 939afc51aaaSDarrick J. Wong struct page *page; 940afc51aaaSDarrick J. Wong int status; 941afc51aaaSDarrick J. Wong 942afc51aaaSDarrick J. Wong status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, 943afc51aaaSDarrick J. Wong iomap); 944afc51aaaSDarrick J. Wong if (status) 945afc51aaaSDarrick J. Wong return status; 946afc51aaaSDarrick J. Wong 947afc51aaaSDarrick J. Wong zero_user(page, offset, bytes); 948afc51aaaSDarrick J. Wong mark_page_accessed(page); 949afc51aaaSDarrick J. Wong 950afc51aaaSDarrick J. Wong return iomap_write_end(inode, pos, bytes, bytes, page, iomap); 951afc51aaaSDarrick J. Wong } 952afc51aaaSDarrick J. Wong 953afc51aaaSDarrick J. Wong static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, 954afc51aaaSDarrick J. Wong struct iomap *iomap) 955afc51aaaSDarrick J. Wong { 956afc51aaaSDarrick J. Wong return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, 957afc51aaaSDarrick J. Wong iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); 958afc51aaaSDarrick J. Wong } 959afc51aaaSDarrick J. Wong 960afc51aaaSDarrick J. Wong static loff_t 961afc51aaaSDarrick J. Wong iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, 962afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 963afc51aaaSDarrick J. Wong { 964afc51aaaSDarrick J. Wong bool *did_zero = data; 965afc51aaaSDarrick J. Wong loff_t written = 0; 966afc51aaaSDarrick J. Wong int status; 967afc51aaaSDarrick J. Wong 968afc51aaaSDarrick J. Wong /* already zeroed? we're done. */ 969afc51aaaSDarrick J. Wong if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 970afc51aaaSDarrick J. Wong return count; 971afc51aaaSDarrick J. Wong 972afc51aaaSDarrick J. Wong do { 973afc51aaaSDarrick J. Wong unsigned offset, bytes; 974afc51aaaSDarrick J. Wong 975afc51aaaSDarrick J. Wong offset = offset_in_page(pos); 976afc51aaaSDarrick J. Wong bytes = min_t(loff_t, PAGE_SIZE - offset, count); 977afc51aaaSDarrick J. Wong 978afc51aaaSDarrick J. Wong if (IS_DAX(inode)) 979afc51aaaSDarrick J. Wong status = iomap_dax_zero(pos, offset, bytes, iomap); 980afc51aaaSDarrick J. Wong else 981afc51aaaSDarrick J. Wong status = iomap_zero(inode, pos, offset, bytes, iomap); 982afc51aaaSDarrick J. Wong if (status < 0) 983afc51aaaSDarrick J. Wong return status; 984afc51aaaSDarrick J. Wong 985afc51aaaSDarrick J. Wong pos += bytes; 986afc51aaaSDarrick J. Wong count -= bytes; 987afc51aaaSDarrick J. Wong written += bytes; 988afc51aaaSDarrick J. Wong if (did_zero) 989afc51aaaSDarrick J. Wong *did_zero = true; 990afc51aaaSDarrick J. Wong } while (count > 0); 991afc51aaaSDarrick J. Wong 992afc51aaaSDarrick J. Wong return written; 993afc51aaaSDarrick J. Wong } 994afc51aaaSDarrick J. Wong 995afc51aaaSDarrick J. Wong int 996afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 997afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 998afc51aaaSDarrick J. Wong { 999afc51aaaSDarrick J. Wong loff_t ret; 1000afc51aaaSDarrick J. Wong 1001afc51aaaSDarrick J. Wong while (len > 0) { 1002afc51aaaSDarrick J. Wong ret = iomap_apply(inode, pos, len, IOMAP_ZERO, 1003afc51aaaSDarrick J. Wong ops, did_zero, iomap_zero_range_actor); 1004afc51aaaSDarrick J. Wong if (ret <= 0) 1005afc51aaaSDarrick J. Wong return ret; 1006afc51aaaSDarrick J. Wong 1007afc51aaaSDarrick J. Wong pos += ret; 1008afc51aaaSDarrick J. Wong len -= ret; 1009afc51aaaSDarrick J. Wong } 1010afc51aaaSDarrick J. Wong 1011afc51aaaSDarrick J. Wong return 0; 1012afc51aaaSDarrick J. Wong } 1013afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range); 1014afc51aaaSDarrick J. Wong 1015afc51aaaSDarrick J. Wong int 1016afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1017afc51aaaSDarrick J. Wong const struct iomap_ops *ops) 1018afc51aaaSDarrick J. Wong { 1019afc51aaaSDarrick J. Wong unsigned int blocksize = i_blocksize(inode); 1020afc51aaaSDarrick J. Wong unsigned int off = pos & (blocksize - 1); 1021afc51aaaSDarrick J. Wong 1022afc51aaaSDarrick J. Wong /* Block boundary? Nothing to do */ 1023afc51aaaSDarrick J. Wong if (!off) 1024afc51aaaSDarrick J. Wong return 0; 1025afc51aaaSDarrick J. Wong return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1026afc51aaaSDarrick J. Wong } 1027afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page); 1028afc51aaaSDarrick J. Wong 1029afc51aaaSDarrick J. Wong static loff_t 1030afc51aaaSDarrick J. Wong iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, 1031afc51aaaSDarrick J. Wong void *data, struct iomap *iomap) 1032afc51aaaSDarrick J. Wong { 1033afc51aaaSDarrick J. Wong struct page *page = data; 1034afc51aaaSDarrick J. Wong int ret; 1035afc51aaaSDarrick J. Wong 1036afc51aaaSDarrick J. Wong if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 1037afc51aaaSDarrick J. Wong ret = __block_write_begin_int(page, pos, length, NULL, iomap); 1038afc51aaaSDarrick J. Wong if (ret) 1039afc51aaaSDarrick J. Wong return ret; 1040afc51aaaSDarrick J. Wong block_commit_write(page, 0, length); 1041afc51aaaSDarrick J. Wong } else { 1042afc51aaaSDarrick J. Wong WARN_ON_ONCE(!PageUptodate(page)); 1043afc51aaaSDarrick J. Wong iomap_page_create(inode, page); 1044afc51aaaSDarrick J. Wong set_page_dirty(page); 1045afc51aaaSDarrick J. Wong } 1046afc51aaaSDarrick J. Wong 1047afc51aaaSDarrick J. Wong return length; 1048afc51aaaSDarrick J. Wong } 1049afc51aaaSDarrick J. Wong 1050afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1051afc51aaaSDarrick J. Wong { 1052afc51aaaSDarrick J. Wong struct page *page = vmf->page; 1053afc51aaaSDarrick J. Wong struct inode *inode = file_inode(vmf->vma->vm_file); 1054afc51aaaSDarrick J. Wong unsigned long length; 1055afc51aaaSDarrick J. Wong loff_t offset, size; 1056afc51aaaSDarrick J. Wong ssize_t ret; 1057afc51aaaSDarrick J. Wong 1058afc51aaaSDarrick J. Wong lock_page(page); 1059afc51aaaSDarrick J. Wong size = i_size_read(inode); 1060afc51aaaSDarrick J. Wong if ((page->mapping != inode->i_mapping) || 1061afc51aaaSDarrick J. Wong (page_offset(page) > size)) { 1062afc51aaaSDarrick J. Wong /* We overload EFAULT to mean page got truncated */ 1063afc51aaaSDarrick J. Wong ret = -EFAULT; 1064afc51aaaSDarrick J. Wong goto out_unlock; 1065afc51aaaSDarrick J. Wong } 1066afc51aaaSDarrick J. Wong 1067afc51aaaSDarrick J. Wong /* page is wholly or partially inside EOF */ 1068afc51aaaSDarrick J. Wong if (((page->index + 1) << PAGE_SHIFT) > size) 1069afc51aaaSDarrick J. Wong length = offset_in_page(size); 1070afc51aaaSDarrick J. Wong else 1071afc51aaaSDarrick J. Wong length = PAGE_SIZE; 1072afc51aaaSDarrick J. Wong 1073afc51aaaSDarrick J. Wong offset = page_offset(page); 1074afc51aaaSDarrick J. Wong while (length > 0) { 1075afc51aaaSDarrick J. Wong ret = iomap_apply(inode, offset, length, 1076afc51aaaSDarrick J. Wong IOMAP_WRITE | IOMAP_FAULT, ops, page, 1077afc51aaaSDarrick J. Wong iomap_page_mkwrite_actor); 1078afc51aaaSDarrick J. Wong if (unlikely(ret <= 0)) 1079afc51aaaSDarrick J. Wong goto out_unlock; 1080afc51aaaSDarrick J. Wong offset += ret; 1081afc51aaaSDarrick J. Wong length -= ret; 1082afc51aaaSDarrick J. Wong } 1083afc51aaaSDarrick J. Wong 1084afc51aaaSDarrick J. Wong wait_for_stable_page(page); 1085afc51aaaSDarrick J. Wong return VM_FAULT_LOCKED; 1086afc51aaaSDarrick J. Wong out_unlock: 1087afc51aaaSDarrick J. Wong unlock_page(page); 1088afc51aaaSDarrick J. Wong return block_page_mkwrite_return(ret); 1089afc51aaaSDarrick J. Wong } 1090afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1091