xref: /openbmc/linux/fs/iomap/buffered-io.c (revision 009d8d84)
1afc51aaaSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0
2afc51aaaSDarrick J. Wong /*
3afc51aaaSDarrick J. Wong  * Copyright (C) 2010 Red Hat, Inc.
4afc51aaaSDarrick J. Wong  * Copyright (c) 2016-2018 Christoph Hellwig.
5afc51aaaSDarrick J. Wong  */
6afc51aaaSDarrick J. Wong #include <linux/module.h>
7afc51aaaSDarrick J. Wong #include <linux/compiler.h>
8afc51aaaSDarrick J. Wong #include <linux/fs.h>
9afc51aaaSDarrick J. Wong #include <linux/iomap.h>
10afc51aaaSDarrick J. Wong #include <linux/pagemap.h>
11afc51aaaSDarrick J. Wong #include <linux/uio.h>
12afc51aaaSDarrick J. Wong #include <linux/buffer_head.h>
13afc51aaaSDarrick J. Wong #include <linux/dax.h>
14afc51aaaSDarrick J. Wong #include <linux/writeback.h>
15afc51aaaSDarrick J. Wong #include <linux/swap.h>
16afc51aaaSDarrick J. Wong #include <linux/bio.h>
17afc51aaaSDarrick J. Wong #include <linux/sched/signal.h>
18afc51aaaSDarrick J. Wong #include <linux/migrate.h>
19afc51aaaSDarrick J. Wong 
20afc51aaaSDarrick J. Wong #include "../internal.h"
21afc51aaaSDarrick J. Wong 
22afc51aaaSDarrick J. Wong static struct iomap_page *
23afc51aaaSDarrick J. Wong iomap_page_create(struct inode *inode, struct page *page)
24afc51aaaSDarrick J. Wong {
25afc51aaaSDarrick J. Wong 	struct iomap_page *iop = to_iomap_page(page);
26afc51aaaSDarrick J. Wong 
27afc51aaaSDarrick J. Wong 	if (iop || i_blocksize(inode) == PAGE_SIZE)
28afc51aaaSDarrick J. Wong 		return iop;
29afc51aaaSDarrick J. Wong 
30afc51aaaSDarrick J. Wong 	iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
31afc51aaaSDarrick J. Wong 	atomic_set(&iop->read_count, 0);
32afc51aaaSDarrick J. Wong 	atomic_set(&iop->write_count, 0);
33afc51aaaSDarrick J. Wong 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
34afc51aaaSDarrick J. Wong 
35afc51aaaSDarrick J. Wong 	/*
36afc51aaaSDarrick J. Wong 	 * migrate_page_move_mapping() assumes that pages with private data have
37afc51aaaSDarrick J. Wong 	 * their count elevated by 1.
38afc51aaaSDarrick J. Wong 	 */
39afc51aaaSDarrick J. Wong 	get_page(page);
40afc51aaaSDarrick J. Wong 	set_page_private(page, (unsigned long)iop);
41afc51aaaSDarrick J. Wong 	SetPagePrivate(page);
42afc51aaaSDarrick J. Wong 	return iop;
43afc51aaaSDarrick J. Wong }
44afc51aaaSDarrick J. Wong 
45afc51aaaSDarrick J. Wong static void
46afc51aaaSDarrick J. Wong iomap_page_release(struct page *page)
47afc51aaaSDarrick J. Wong {
48afc51aaaSDarrick J. Wong 	struct iomap_page *iop = to_iomap_page(page);
49afc51aaaSDarrick J. Wong 
50afc51aaaSDarrick J. Wong 	if (!iop)
51afc51aaaSDarrick J. Wong 		return;
52afc51aaaSDarrick J. Wong 	WARN_ON_ONCE(atomic_read(&iop->read_count));
53afc51aaaSDarrick J. Wong 	WARN_ON_ONCE(atomic_read(&iop->write_count));
54afc51aaaSDarrick J. Wong 	ClearPagePrivate(page);
55afc51aaaSDarrick J. Wong 	set_page_private(page, 0);
56afc51aaaSDarrick J. Wong 	put_page(page);
57afc51aaaSDarrick J. Wong 	kfree(iop);
58afc51aaaSDarrick J. Wong }
59afc51aaaSDarrick J. Wong 
60afc51aaaSDarrick J. Wong /*
61afc51aaaSDarrick J. Wong  * Calculate the range inside the page that we actually need to read.
62afc51aaaSDarrick J. Wong  */
63afc51aaaSDarrick J. Wong static void
64afc51aaaSDarrick J. Wong iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
65afc51aaaSDarrick J. Wong 		loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
66afc51aaaSDarrick J. Wong {
67afc51aaaSDarrick J. Wong 	loff_t orig_pos = *pos;
68afc51aaaSDarrick J. Wong 	loff_t isize = i_size_read(inode);
69afc51aaaSDarrick J. Wong 	unsigned block_bits = inode->i_blkbits;
70afc51aaaSDarrick J. Wong 	unsigned block_size = (1 << block_bits);
71afc51aaaSDarrick J. Wong 	unsigned poff = offset_in_page(*pos);
72afc51aaaSDarrick J. Wong 	unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
73afc51aaaSDarrick J. Wong 	unsigned first = poff >> block_bits;
74afc51aaaSDarrick J. Wong 	unsigned last = (poff + plen - 1) >> block_bits;
75afc51aaaSDarrick J. Wong 
76afc51aaaSDarrick J. Wong 	/*
77afc51aaaSDarrick J. Wong 	 * If the block size is smaller than the page size we need to check the
78afc51aaaSDarrick J. Wong 	 * per-block uptodate status and adjust the offset and length if needed
79afc51aaaSDarrick J. Wong 	 * to avoid reading in already uptodate ranges.
80afc51aaaSDarrick J. Wong 	 */
81afc51aaaSDarrick J. Wong 	if (iop) {
82afc51aaaSDarrick J. Wong 		unsigned int i;
83afc51aaaSDarrick J. Wong 
84afc51aaaSDarrick J. Wong 		/* move forward for each leading block marked uptodate */
85afc51aaaSDarrick J. Wong 		for (i = first; i <= last; i++) {
86afc51aaaSDarrick J. Wong 			if (!test_bit(i, iop->uptodate))
87afc51aaaSDarrick J. Wong 				break;
88afc51aaaSDarrick J. Wong 			*pos += block_size;
89afc51aaaSDarrick J. Wong 			poff += block_size;
90afc51aaaSDarrick J. Wong 			plen -= block_size;
91afc51aaaSDarrick J. Wong 			first++;
92afc51aaaSDarrick J. Wong 		}
93afc51aaaSDarrick J. Wong 
94afc51aaaSDarrick J. Wong 		/* truncate len if we find any trailing uptodate block(s) */
95afc51aaaSDarrick J. Wong 		for ( ; i <= last; i++) {
96afc51aaaSDarrick J. Wong 			if (test_bit(i, iop->uptodate)) {
97afc51aaaSDarrick J. Wong 				plen -= (last - i + 1) * block_size;
98afc51aaaSDarrick J. Wong 				last = i - 1;
99afc51aaaSDarrick J. Wong 				break;
100afc51aaaSDarrick J. Wong 			}
101afc51aaaSDarrick J. Wong 		}
102afc51aaaSDarrick J. Wong 	}
103afc51aaaSDarrick J. Wong 
104afc51aaaSDarrick J. Wong 	/*
105afc51aaaSDarrick J. Wong 	 * If the extent spans the block that contains the i_size we need to
106afc51aaaSDarrick J. Wong 	 * handle both halves separately so that we properly zero data in the
107afc51aaaSDarrick J. Wong 	 * page cache for blocks that are entirely outside of i_size.
108afc51aaaSDarrick J. Wong 	 */
109afc51aaaSDarrick J. Wong 	if (orig_pos <= isize && orig_pos + length > isize) {
110afc51aaaSDarrick J. Wong 		unsigned end = offset_in_page(isize - 1) >> block_bits;
111afc51aaaSDarrick J. Wong 
112afc51aaaSDarrick J. Wong 		if (first <= end && last > end)
113afc51aaaSDarrick J. Wong 			plen -= (last - end) * block_size;
114afc51aaaSDarrick J. Wong 	}
115afc51aaaSDarrick J. Wong 
116afc51aaaSDarrick J. Wong 	*offp = poff;
117afc51aaaSDarrick J. Wong 	*lenp = plen;
118afc51aaaSDarrick J. Wong }
119afc51aaaSDarrick J. Wong 
120afc51aaaSDarrick J. Wong static void
121afc51aaaSDarrick J. Wong iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
122afc51aaaSDarrick J. Wong {
123afc51aaaSDarrick J. Wong 	struct iomap_page *iop = to_iomap_page(page);
124afc51aaaSDarrick J. Wong 	struct inode *inode = page->mapping->host;
125afc51aaaSDarrick J. Wong 	unsigned first = off >> inode->i_blkbits;
126afc51aaaSDarrick J. Wong 	unsigned last = (off + len - 1) >> inode->i_blkbits;
127afc51aaaSDarrick J. Wong 	unsigned int i;
128afc51aaaSDarrick J. Wong 	bool uptodate = true;
129afc51aaaSDarrick J. Wong 
130afc51aaaSDarrick J. Wong 	if (iop) {
131afc51aaaSDarrick J. Wong 		for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
132afc51aaaSDarrick J. Wong 			if (i >= first && i <= last)
133afc51aaaSDarrick J. Wong 				set_bit(i, iop->uptodate);
134afc51aaaSDarrick J. Wong 			else if (!test_bit(i, iop->uptodate))
135afc51aaaSDarrick J. Wong 				uptodate = false;
136afc51aaaSDarrick J. Wong 		}
137afc51aaaSDarrick J. Wong 	}
138afc51aaaSDarrick J. Wong 
139afc51aaaSDarrick J. Wong 	if (uptodate && !PageError(page))
140afc51aaaSDarrick J. Wong 		SetPageUptodate(page);
141afc51aaaSDarrick J. Wong }
142afc51aaaSDarrick J. Wong 
143afc51aaaSDarrick J. Wong static void
144afc51aaaSDarrick J. Wong iomap_read_finish(struct iomap_page *iop, struct page *page)
145afc51aaaSDarrick J. Wong {
146afc51aaaSDarrick J. Wong 	if (!iop || atomic_dec_and_test(&iop->read_count))
147afc51aaaSDarrick J. Wong 		unlock_page(page);
148afc51aaaSDarrick J. Wong }
149afc51aaaSDarrick J. Wong 
150afc51aaaSDarrick J. Wong static void
151afc51aaaSDarrick J. Wong iomap_read_page_end_io(struct bio_vec *bvec, int error)
152afc51aaaSDarrick J. Wong {
153afc51aaaSDarrick J. Wong 	struct page *page = bvec->bv_page;
154afc51aaaSDarrick J. Wong 	struct iomap_page *iop = to_iomap_page(page);
155afc51aaaSDarrick J. Wong 
156afc51aaaSDarrick J. Wong 	if (unlikely(error)) {
157afc51aaaSDarrick J. Wong 		ClearPageUptodate(page);
158afc51aaaSDarrick J. Wong 		SetPageError(page);
159afc51aaaSDarrick J. Wong 	} else {
160afc51aaaSDarrick J. Wong 		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
161afc51aaaSDarrick J. Wong 	}
162afc51aaaSDarrick J. Wong 
163afc51aaaSDarrick J. Wong 	iomap_read_finish(iop, page);
164afc51aaaSDarrick J. Wong }
165afc51aaaSDarrick J. Wong 
166afc51aaaSDarrick J. Wong static void
167afc51aaaSDarrick J. Wong iomap_read_end_io(struct bio *bio)
168afc51aaaSDarrick J. Wong {
169afc51aaaSDarrick J. Wong 	int error = blk_status_to_errno(bio->bi_status);
170afc51aaaSDarrick J. Wong 	struct bio_vec *bvec;
171afc51aaaSDarrick J. Wong 	struct bvec_iter_all iter_all;
172afc51aaaSDarrick J. Wong 
173afc51aaaSDarrick J. Wong 	bio_for_each_segment_all(bvec, bio, iter_all)
174afc51aaaSDarrick J. Wong 		iomap_read_page_end_io(bvec, error);
175afc51aaaSDarrick J. Wong 	bio_put(bio);
176afc51aaaSDarrick J. Wong }
177afc51aaaSDarrick J. Wong 
178afc51aaaSDarrick J. Wong struct iomap_readpage_ctx {
179afc51aaaSDarrick J. Wong 	struct page		*cur_page;
180afc51aaaSDarrick J. Wong 	bool			cur_page_in_bio;
181afc51aaaSDarrick J. Wong 	bool			is_readahead;
182afc51aaaSDarrick J. Wong 	struct bio		*bio;
183afc51aaaSDarrick J. Wong 	struct list_head	*pages;
184afc51aaaSDarrick J. Wong };
185afc51aaaSDarrick J. Wong 
186afc51aaaSDarrick J. Wong static void
187afc51aaaSDarrick J. Wong iomap_read_inline_data(struct inode *inode, struct page *page,
188afc51aaaSDarrick J. Wong 		struct iomap *iomap)
189afc51aaaSDarrick J. Wong {
190afc51aaaSDarrick J. Wong 	size_t size = i_size_read(inode);
191afc51aaaSDarrick J. Wong 	void *addr;
192afc51aaaSDarrick J. Wong 
193afc51aaaSDarrick J. Wong 	if (PageUptodate(page))
194afc51aaaSDarrick J. Wong 		return;
195afc51aaaSDarrick J. Wong 
196afc51aaaSDarrick J. Wong 	BUG_ON(page->index);
197afc51aaaSDarrick J. Wong 	BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
198afc51aaaSDarrick J. Wong 
199afc51aaaSDarrick J. Wong 	addr = kmap_atomic(page);
200afc51aaaSDarrick J. Wong 	memcpy(addr, iomap->inline_data, size);
201afc51aaaSDarrick J. Wong 	memset(addr + size, 0, PAGE_SIZE - size);
202afc51aaaSDarrick J. Wong 	kunmap_atomic(addr);
203afc51aaaSDarrick J. Wong 	SetPageUptodate(page);
204afc51aaaSDarrick J. Wong }
205afc51aaaSDarrick J. Wong 
206009d8d84SChristoph Hellwig static inline bool iomap_block_needs_zeroing(struct inode *inode,
207009d8d84SChristoph Hellwig 		struct iomap *iomap, loff_t pos)
208009d8d84SChristoph Hellwig {
209009d8d84SChristoph Hellwig 	return iomap->type != IOMAP_MAPPED ||
210009d8d84SChristoph Hellwig 		(iomap->flags & IOMAP_F_NEW) ||
211009d8d84SChristoph Hellwig 		pos >= i_size_read(inode);
212009d8d84SChristoph Hellwig }
213009d8d84SChristoph Hellwig 
214afc51aaaSDarrick J. Wong static loff_t
215afc51aaaSDarrick J. Wong iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
216afc51aaaSDarrick J. Wong 		struct iomap *iomap)
217afc51aaaSDarrick J. Wong {
218afc51aaaSDarrick J. Wong 	struct iomap_readpage_ctx *ctx = data;
219afc51aaaSDarrick J. Wong 	struct page *page = ctx->cur_page;
220afc51aaaSDarrick J. Wong 	struct iomap_page *iop = iomap_page_create(inode, page);
221afc51aaaSDarrick J. Wong 	bool same_page = false, is_contig = false;
222afc51aaaSDarrick J. Wong 	loff_t orig_pos = pos;
223afc51aaaSDarrick J. Wong 	unsigned poff, plen;
224afc51aaaSDarrick J. Wong 	sector_t sector;
225afc51aaaSDarrick J. Wong 
226afc51aaaSDarrick J. Wong 	if (iomap->type == IOMAP_INLINE) {
227afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(pos);
228afc51aaaSDarrick J. Wong 		iomap_read_inline_data(inode, page, iomap);
229afc51aaaSDarrick J. Wong 		return PAGE_SIZE;
230afc51aaaSDarrick J. Wong 	}
231afc51aaaSDarrick J. Wong 
232afc51aaaSDarrick J. Wong 	/* zero post-eof blocks as the page may be mapped */
233afc51aaaSDarrick J. Wong 	iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
234afc51aaaSDarrick J. Wong 	if (plen == 0)
235afc51aaaSDarrick J. Wong 		goto done;
236afc51aaaSDarrick J. Wong 
237009d8d84SChristoph Hellwig 	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
238afc51aaaSDarrick J. Wong 		zero_user(page, poff, plen);
239afc51aaaSDarrick J. Wong 		iomap_set_range_uptodate(page, poff, plen);
240afc51aaaSDarrick J. Wong 		goto done;
241afc51aaaSDarrick J. Wong 	}
242afc51aaaSDarrick J. Wong 
243afc51aaaSDarrick J. Wong 	ctx->cur_page_in_bio = true;
244afc51aaaSDarrick J. Wong 
245afc51aaaSDarrick J. Wong 	/*
246afc51aaaSDarrick J. Wong 	 * Try to merge into a previous segment if we can.
247afc51aaaSDarrick J. Wong 	 */
248afc51aaaSDarrick J. Wong 	sector = iomap_sector(iomap, pos);
249afc51aaaSDarrick J. Wong 	if (ctx->bio && bio_end_sector(ctx->bio) == sector)
250afc51aaaSDarrick J. Wong 		is_contig = true;
251afc51aaaSDarrick J. Wong 
252afc51aaaSDarrick J. Wong 	if (is_contig &&
253afc51aaaSDarrick J. Wong 	    __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
254afc51aaaSDarrick J. Wong 		if (!same_page && iop)
255afc51aaaSDarrick J. Wong 			atomic_inc(&iop->read_count);
256afc51aaaSDarrick J. Wong 		goto done;
257afc51aaaSDarrick J. Wong 	}
258afc51aaaSDarrick J. Wong 
259afc51aaaSDarrick J. Wong 	/*
260afc51aaaSDarrick J. Wong 	 * If we start a new segment we need to increase the read count, and we
261afc51aaaSDarrick J. Wong 	 * need to do so before submitting any previous full bio to make sure
262afc51aaaSDarrick J. Wong 	 * that we don't prematurely unlock the page.
263afc51aaaSDarrick J. Wong 	 */
264afc51aaaSDarrick J. Wong 	if (iop)
265afc51aaaSDarrick J. Wong 		atomic_inc(&iop->read_count);
266afc51aaaSDarrick J. Wong 
267afc51aaaSDarrick J. Wong 	if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
268afc51aaaSDarrick J. Wong 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
269afc51aaaSDarrick J. Wong 		int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
270afc51aaaSDarrick J. Wong 
271afc51aaaSDarrick J. Wong 		if (ctx->bio)
272afc51aaaSDarrick J. Wong 			submit_bio(ctx->bio);
273afc51aaaSDarrick J. Wong 
274afc51aaaSDarrick J. Wong 		if (ctx->is_readahead) /* same as readahead_gfp_mask */
275afc51aaaSDarrick J. Wong 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
276afc51aaaSDarrick J. Wong 		ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
277afc51aaaSDarrick J. Wong 		ctx->bio->bi_opf = REQ_OP_READ;
278afc51aaaSDarrick J. Wong 		if (ctx->is_readahead)
279afc51aaaSDarrick J. Wong 			ctx->bio->bi_opf |= REQ_RAHEAD;
280afc51aaaSDarrick J. Wong 		ctx->bio->bi_iter.bi_sector = sector;
281afc51aaaSDarrick J. Wong 		bio_set_dev(ctx->bio, iomap->bdev);
282afc51aaaSDarrick J. Wong 		ctx->bio->bi_end_io = iomap_read_end_io;
283afc51aaaSDarrick J. Wong 	}
284afc51aaaSDarrick J. Wong 
285afc51aaaSDarrick J. Wong 	bio_add_page(ctx->bio, page, plen, poff);
286afc51aaaSDarrick J. Wong done:
287afc51aaaSDarrick J. Wong 	/*
288afc51aaaSDarrick J. Wong 	 * Move the caller beyond our range so that it keeps making progress.
289afc51aaaSDarrick J. Wong 	 * For that we have to include any leading non-uptodate ranges, but
290afc51aaaSDarrick J. Wong 	 * we can skip trailing ones as they will be handled in the next
291afc51aaaSDarrick J. Wong 	 * iteration.
292afc51aaaSDarrick J. Wong 	 */
293afc51aaaSDarrick J. Wong 	return pos - orig_pos + plen;
294afc51aaaSDarrick J. Wong }
295afc51aaaSDarrick J. Wong 
296afc51aaaSDarrick J. Wong int
297afc51aaaSDarrick J. Wong iomap_readpage(struct page *page, const struct iomap_ops *ops)
298afc51aaaSDarrick J. Wong {
299afc51aaaSDarrick J. Wong 	struct iomap_readpage_ctx ctx = { .cur_page = page };
300afc51aaaSDarrick J. Wong 	struct inode *inode = page->mapping->host;
301afc51aaaSDarrick J. Wong 	unsigned poff;
302afc51aaaSDarrick J. Wong 	loff_t ret;
303afc51aaaSDarrick J. Wong 
304afc51aaaSDarrick J. Wong 	for (poff = 0; poff < PAGE_SIZE; poff += ret) {
305afc51aaaSDarrick J. Wong 		ret = iomap_apply(inode, page_offset(page) + poff,
306afc51aaaSDarrick J. Wong 				PAGE_SIZE - poff, 0, ops, &ctx,
307afc51aaaSDarrick J. Wong 				iomap_readpage_actor);
308afc51aaaSDarrick J. Wong 		if (ret <= 0) {
309afc51aaaSDarrick J. Wong 			WARN_ON_ONCE(ret == 0);
310afc51aaaSDarrick J. Wong 			SetPageError(page);
311afc51aaaSDarrick J. Wong 			break;
312afc51aaaSDarrick J. Wong 		}
313afc51aaaSDarrick J. Wong 	}
314afc51aaaSDarrick J. Wong 
315afc51aaaSDarrick J. Wong 	if (ctx.bio) {
316afc51aaaSDarrick J. Wong 		submit_bio(ctx.bio);
317afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(!ctx.cur_page_in_bio);
318afc51aaaSDarrick J. Wong 	} else {
319afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(ctx.cur_page_in_bio);
320afc51aaaSDarrick J. Wong 		unlock_page(page);
321afc51aaaSDarrick J. Wong 	}
322afc51aaaSDarrick J. Wong 
323afc51aaaSDarrick J. Wong 	/*
324afc51aaaSDarrick J. Wong 	 * Just like mpage_readpages and block_read_full_page we always
325afc51aaaSDarrick J. Wong 	 * return 0 and just mark the page as PageError on errors.  This
326afc51aaaSDarrick J. Wong 	 * should be cleaned up all through the stack eventually.
327afc51aaaSDarrick J. Wong 	 */
328afc51aaaSDarrick J. Wong 	return 0;
329afc51aaaSDarrick J. Wong }
330afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpage);
331afc51aaaSDarrick J. Wong 
332afc51aaaSDarrick J. Wong static struct page *
333afc51aaaSDarrick J. Wong iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
334afc51aaaSDarrick J. Wong 		loff_t length, loff_t *done)
335afc51aaaSDarrick J. Wong {
336afc51aaaSDarrick J. Wong 	while (!list_empty(pages)) {
337afc51aaaSDarrick J. Wong 		struct page *page = lru_to_page(pages);
338afc51aaaSDarrick J. Wong 
339afc51aaaSDarrick J. Wong 		if (page_offset(page) >= (u64)pos + length)
340afc51aaaSDarrick J. Wong 			break;
341afc51aaaSDarrick J. Wong 
342afc51aaaSDarrick J. Wong 		list_del(&page->lru);
343afc51aaaSDarrick J. Wong 		if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
344afc51aaaSDarrick J. Wong 				GFP_NOFS))
345afc51aaaSDarrick J. Wong 			return page;
346afc51aaaSDarrick J. Wong 
347afc51aaaSDarrick J. Wong 		/*
348afc51aaaSDarrick J. Wong 		 * If we already have a page in the page cache at index we are
349afc51aaaSDarrick J. Wong 		 * done.  Upper layers don't care if it is uptodate after the
350afc51aaaSDarrick J. Wong 		 * readpages call itself as every page gets checked again once
351afc51aaaSDarrick J. Wong 		 * actually needed.
352afc51aaaSDarrick J. Wong 		 */
353afc51aaaSDarrick J. Wong 		*done += PAGE_SIZE;
354afc51aaaSDarrick J. Wong 		put_page(page);
355afc51aaaSDarrick J. Wong 	}
356afc51aaaSDarrick J. Wong 
357afc51aaaSDarrick J. Wong 	return NULL;
358afc51aaaSDarrick J. Wong }
359afc51aaaSDarrick J. Wong 
360afc51aaaSDarrick J. Wong static loff_t
361afc51aaaSDarrick J. Wong iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
362afc51aaaSDarrick J. Wong 		void *data, struct iomap *iomap)
363afc51aaaSDarrick J. Wong {
364afc51aaaSDarrick J. Wong 	struct iomap_readpage_ctx *ctx = data;
365afc51aaaSDarrick J. Wong 	loff_t done, ret;
366afc51aaaSDarrick J. Wong 
367afc51aaaSDarrick J. Wong 	for (done = 0; done < length; done += ret) {
368afc51aaaSDarrick J. Wong 		if (ctx->cur_page && offset_in_page(pos + done) == 0) {
369afc51aaaSDarrick J. Wong 			if (!ctx->cur_page_in_bio)
370afc51aaaSDarrick J. Wong 				unlock_page(ctx->cur_page);
371afc51aaaSDarrick J. Wong 			put_page(ctx->cur_page);
372afc51aaaSDarrick J. Wong 			ctx->cur_page = NULL;
373afc51aaaSDarrick J. Wong 		}
374afc51aaaSDarrick J. Wong 		if (!ctx->cur_page) {
375afc51aaaSDarrick J. Wong 			ctx->cur_page = iomap_next_page(inode, ctx->pages,
376afc51aaaSDarrick J. Wong 					pos, length, &done);
377afc51aaaSDarrick J. Wong 			if (!ctx->cur_page)
378afc51aaaSDarrick J. Wong 				break;
379afc51aaaSDarrick J. Wong 			ctx->cur_page_in_bio = false;
380afc51aaaSDarrick J. Wong 		}
381afc51aaaSDarrick J. Wong 		ret = iomap_readpage_actor(inode, pos + done, length - done,
382afc51aaaSDarrick J. Wong 				ctx, iomap);
383afc51aaaSDarrick J. Wong 	}
384afc51aaaSDarrick J. Wong 
385afc51aaaSDarrick J. Wong 	return done;
386afc51aaaSDarrick J. Wong }
387afc51aaaSDarrick J. Wong 
388afc51aaaSDarrick J. Wong int
389afc51aaaSDarrick J. Wong iomap_readpages(struct address_space *mapping, struct list_head *pages,
390afc51aaaSDarrick J. Wong 		unsigned nr_pages, const struct iomap_ops *ops)
391afc51aaaSDarrick J. Wong {
392afc51aaaSDarrick J. Wong 	struct iomap_readpage_ctx ctx = {
393afc51aaaSDarrick J. Wong 		.pages		= pages,
394afc51aaaSDarrick J. Wong 		.is_readahead	= true,
395afc51aaaSDarrick J. Wong 	};
396afc51aaaSDarrick J. Wong 	loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
397afc51aaaSDarrick J. Wong 	loff_t last = page_offset(list_entry(pages->next, struct page, lru));
398afc51aaaSDarrick J. Wong 	loff_t length = last - pos + PAGE_SIZE, ret = 0;
399afc51aaaSDarrick J. Wong 
400afc51aaaSDarrick J. Wong 	while (length > 0) {
401afc51aaaSDarrick J. Wong 		ret = iomap_apply(mapping->host, pos, length, 0, ops,
402afc51aaaSDarrick J. Wong 				&ctx, iomap_readpages_actor);
403afc51aaaSDarrick J. Wong 		if (ret <= 0) {
404afc51aaaSDarrick J. Wong 			WARN_ON_ONCE(ret == 0);
405afc51aaaSDarrick J. Wong 			goto done;
406afc51aaaSDarrick J. Wong 		}
407afc51aaaSDarrick J. Wong 		pos += ret;
408afc51aaaSDarrick J. Wong 		length -= ret;
409afc51aaaSDarrick J. Wong 	}
410afc51aaaSDarrick J. Wong 	ret = 0;
411afc51aaaSDarrick J. Wong done:
412afc51aaaSDarrick J. Wong 	if (ctx.bio)
413afc51aaaSDarrick J. Wong 		submit_bio(ctx.bio);
414afc51aaaSDarrick J. Wong 	if (ctx.cur_page) {
415afc51aaaSDarrick J. Wong 		if (!ctx.cur_page_in_bio)
416afc51aaaSDarrick J. Wong 			unlock_page(ctx.cur_page);
417afc51aaaSDarrick J. Wong 		put_page(ctx.cur_page);
418afc51aaaSDarrick J. Wong 	}
419afc51aaaSDarrick J. Wong 
420afc51aaaSDarrick J. Wong 	/*
421afc51aaaSDarrick J. Wong 	 * Check that we didn't lose a page due to the arcance calling
422afc51aaaSDarrick J. Wong 	 * conventions..
423afc51aaaSDarrick J. Wong 	 */
424afc51aaaSDarrick J. Wong 	WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
425afc51aaaSDarrick J. Wong 	return ret;
426afc51aaaSDarrick J. Wong }
427afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_readpages);
428afc51aaaSDarrick J. Wong 
429afc51aaaSDarrick J. Wong /*
430afc51aaaSDarrick J. Wong  * iomap_is_partially_uptodate checks whether blocks within a page are
431afc51aaaSDarrick J. Wong  * uptodate or not.
432afc51aaaSDarrick J. Wong  *
433afc51aaaSDarrick J. Wong  * Returns true if all blocks which correspond to a file portion
434afc51aaaSDarrick J. Wong  * we want to read within the page are uptodate.
435afc51aaaSDarrick J. Wong  */
436afc51aaaSDarrick J. Wong int
437afc51aaaSDarrick J. Wong iomap_is_partially_uptodate(struct page *page, unsigned long from,
438afc51aaaSDarrick J. Wong 		unsigned long count)
439afc51aaaSDarrick J. Wong {
440afc51aaaSDarrick J. Wong 	struct iomap_page *iop = to_iomap_page(page);
441afc51aaaSDarrick J. Wong 	struct inode *inode = page->mapping->host;
442afc51aaaSDarrick J. Wong 	unsigned len, first, last;
443afc51aaaSDarrick J. Wong 	unsigned i;
444afc51aaaSDarrick J. Wong 
445afc51aaaSDarrick J. Wong 	/* Limit range to one page */
446afc51aaaSDarrick J. Wong 	len = min_t(unsigned, PAGE_SIZE - from, count);
447afc51aaaSDarrick J. Wong 
448afc51aaaSDarrick J. Wong 	/* First and last blocks in range within page */
449afc51aaaSDarrick J. Wong 	first = from >> inode->i_blkbits;
450afc51aaaSDarrick J. Wong 	last = (from + len - 1) >> inode->i_blkbits;
451afc51aaaSDarrick J. Wong 
452afc51aaaSDarrick J. Wong 	if (iop) {
453afc51aaaSDarrick J. Wong 		for (i = first; i <= last; i++)
454afc51aaaSDarrick J. Wong 			if (!test_bit(i, iop->uptodate))
455afc51aaaSDarrick J. Wong 				return 0;
456afc51aaaSDarrick J. Wong 		return 1;
457afc51aaaSDarrick J. Wong 	}
458afc51aaaSDarrick J. Wong 
459afc51aaaSDarrick J. Wong 	return 0;
460afc51aaaSDarrick J. Wong }
461afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
462afc51aaaSDarrick J. Wong 
463afc51aaaSDarrick J. Wong int
464afc51aaaSDarrick J. Wong iomap_releasepage(struct page *page, gfp_t gfp_mask)
465afc51aaaSDarrick J. Wong {
466afc51aaaSDarrick J. Wong 	/*
467afc51aaaSDarrick J. Wong 	 * mm accommodates an old ext3 case where clean pages might not have had
468afc51aaaSDarrick J. Wong 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
469afc51aaaSDarrick J. Wong 	 * ->releasepage() via shrink_active_list(), skip those here.
470afc51aaaSDarrick J. Wong 	 */
471afc51aaaSDarrick J. Wong 	if (PageDirty(page) || PageWriteback(page))
472afc51aaaSDarrick J. Wong 		return 0;
473afc51aaaSDarrick J. Wong 	iomap_page_release(page);
474afc51aaaSDarrick J. Wong 	return 1;
475afc51aaaSDarrick J. Wong }
476afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_releasepage);
477afc51aaaSDarrick J. Wong 
478afc51aaaSDarrick J. Wong void
479afc51aaaSDarrick J. Wong iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
480afc51aaaSDarrick J. Wong {
481afc51aaaSDarrick J. Wong 	/*
482afc51aaaSDarrick J. Wong 	 * If we are invalidating the entire page, clear the dirty state from it
483afc51aaaSDarrick J. Wong 	 * and release it to avoid unnecessary buildup of the LRU.
484afc51aaaSDarrick J. Wong 	 */
485afc51aaaSDarrick J. Wong 	if (offset == 0 && len == PAGE_SIZE) {
486afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(PageWriteback(page));
487afc51aaaSDarrick J. Wong 		cancel_dirty_page(page);
488afc51aaaSDarrick J. Wong 		iomap_page_release(page);
489afc51aaaSDarrick J. Wong 	}
490afc51aaaSDarrick J. Wong }
491afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_invalidatepage);
492afc51aaaSDarrick J. Wong 
493afc51aaaSDarrick J. Wong #ifdef CONFIG_MIGRATION
494afc51aaaSDarrick J. Wong int
495afc51aaaSDarrick J. Wong iomap_migrate_page(struct address_space *mapping, struct page *newpage,
496afc51aaaSDarrick J. Wong 		struct page *page, enum migrate_mode mode)
497afc51aaaSDarrick J. Wong {
498afc51aaaSDarrick J. Wong 	int ret;
499afc51aaaSDarrick J. Wong 
50026473f83SLinus Torvalds 	ret = migrate_page_move_mapping(mapping, newpage, page, 0);
501afc51aaaSDarrick J. Wong 	if (ret != MIGRATEPAGE_SUCCESS)
502afc51aaaSDarrick J. Wong 		return ret;
503afc51aaaSDarrick J. Wong 
504afc51aaaSDarrick J. Wong 	if (page_has_private(page)) {
505afc51aaaSDarrick J. Wong 		ClearPagePrivate(page);
506afc51aaaSDarrick J. Wong 		get_page(newpage);
507afc51aaaSDarrick J. Wong 		set_page_private(newpage, page_private(page));
508afc51aaaSDarrick J. Wong 		set_page_private(page, 0);
509afc51aaaSDarrick J. Wong 		put_page(page);
510afc51aaaSDarrick J. Wong 		SetPagePrivate(newpage);
511afc51aaaSDarrick J. Wong 	}
512afc51aaaSDarrick J. Wong 
513afc51aaaSDarrick J. Wong 	if (mode != MIGRATE_SYNC_NO_COPY)
514afc51aaaSDarrick J. Wong 		migrate_page_copy(newpage, page);
515afc51aaaSDarrick J. Wong 	else
516afc51aaaSDarrick J. Wong 		migrate_page_states(newpage, page);
517afc51aaaSDarrick J. Wong 	return MIGRATEPAGE_SUCCESS;
518afc51aaaSDarrick J. Wong }
519afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_migrate_page);
520afc51aaaSDarrick J. Wong #endif /* CONFIG_MIGRATION */
521afc51aaaSDarrick J. Wong 
522afc51aaaSDarrick J. Wong static void
523afc51aaaSDarrick J. Wong iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
524afc51aaaSDarrick J. Wong {
525afc51aaaSDarrick J. Wong 	loff_t i_size = i_size_read(inode);
526afc51aaaSDarrick J. Wong 
527afc51aaaSDarrick J. Wong 	/*
528afc51aaaSDarrick J. Wong 	 * Only truncate newly allocated pages beyoned EOF, even if the
529afc51aaaSDarrick J. Wong 	 * write started inside the existing inode size.
530afc51aaaSDarrick J. Wong 	 */
531afc51aaaSDarrick J. Wong 	if (pos + len > i_size)
532afc51aaaSDarrick J. Wong 		truncate_pagecache_range(inode, max(pos, i_size), pos + len);
533afc51aaaSDarrick J. Wong }
534afc51aaaSDarrick J. Wong 
535afc51aaaSDarrick J. Wong static int
536afc51aaaSDarrick J. Wong iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
537afc51aaaSDarrick J. Wong 		unsigned poff, unsigned plen, unsigned from, unsigned to,
538afc51aaaSDarrick J. Wong 		struct iomap *iomap)
539afc51aaaSDarrick J. Wong {
540afc51aaaSDarrick J. Wong 	struct bio_vec bvec;
541afc51aaaSDarrick J. Wong 	struct bio bio;
542afc51aaaSDarrick J. Wong 
543009d8d84SChristoph Hellwig 	if (iomap_block_needs_zeroing(inode, iomap, block_start)) {
544afc51aaaSDarrick J. Wong 		zero_user_segments(page, poff, from, to, poff + plen);
545afc51aaaSDarrick J. Wong 		iomap_set_range_uptodate(page, poff, plen);
546afc51aaaSDarrick J. Wong 		return 0;
547afc51aaaSDarrick J. Wong 	}
548afc51aaaSDarrick J. Wong 
549afc51aaaSDarrick J. Wong 	bio_init(&bio, &bvec, 1);
550afc51aaaSDarrick J. Wong 	bio.bi_opf = REQ_OP_READ;
551afc51aaaSDarrick J. Wong 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
552afc51aaaSDarrick J. Wong 	bio_set_dev(&bio, iomap->bdev);
553afc51aaaSDarrick J. Wong 	__bio_add_page(&bio, page, plen, poff);
554afc51aaaSDarrick J. Wong 	return submit_bio_wait(&bio);
555afc51aaaSDarrick J. Wong }
556afc51aaaSDarrick J. Wong 
557afc51aaaSDarrick J. Wong static int
558afc51aaaSDarrick J. Wong __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
559afc51aaaSDarrick J. Wong 		struct page *page, struct iomap *iomap)
560afc51aaaSDarrick J. Wong {
561afc51aaaSDarrick J. Wong 	struct iomap_page *iop = iomap_page_create(inode, page);
562afc51aaaSDarrick J. Wong 	loff_t block_size = i_blocksize(inode);
563afc51aaaSDarrick J. Wong 	loff_t block_start = pos & ~(block_size - 1);
564afc51aaaSDarrick J. Wong 	loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
565afc51aaaSDarrick J. Wong 	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
566afc51aaaSDarrick J. Wong 	int status = 0;
567afc51aaaSDarrick J. Wong 
568afc51aaaSDarrick J. Wong 	if (PageUptodate(page))
569afc51aaaSDarrick J. Wong 		return 0;
570afc51aaaSDarrick J. Wong 
571afc51aaaSDarrick J. Wong 	do {
572afc51aaaSDarrick J. Wong 		iomap_adjust_read_range(inode, iop, &block_start,
573afc51aaaSDarrick J. Wong 				block_end - block_start, &poff, &plen);
574afc51aaaSDarrick J. Wong 		if (plen == 0)
575afc51aaaSDarrick J. Wong 			break;
576afc51aaaSDarrick J. Wong 
577afc51aaaSDarrick J. Wong 		if ((from > poff && from < poff + plen) ||
578afc51aaaSDarrick J. Wong 		    (to > poff && to < poff + plen)) {
579afc51aaaSDarrick J. Wong 			status = iomap_read_page_sync(inode, block_start, page,
580afc51aaaSDarrick J. Wong 					poff, plen, from, to, iomap);
581afc51aaaSDarrick J. Wong 			if (status)
582afc51aaaSDarrick J. Wong 				break;
583afc51aaaSDarrick J. Wong 		}
584afc51aaaSDarrick J. Wong 
585afc51aaaSDarrick J. Wong 	} while ((block_start += plen) < block_end);
586afc51aaaSDarrick J. Wong 
587afc51aaaSDarrick J. Wong 	return status;
588afc51aaaSDarrick J. Wong }
589afc51aaaSDarrick J. Wong 
590afc51aaaSDarrick J. Wong static int
591afc51aaaSDarrick J. Wong iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
592afc51aaaSDarrick J. Wong 		struct page **pagep, struct iomap *iomap)
593afc51aaaSDarrick J. Wong {
594afc51aaaSDarrick J. Wong 	const struct iomap_page_ops *page_ops = iomap->page_ops;
595afc51aaaSDarrick J. Wong 	pgoff_t index = pos >> PAGE_SHIFT;
596afc51aaaSDarrick J. Wong 	struct page *page;
597afc51aaaSDarrick J. Wong 	int status = 0;
598afc51aaaSDarrick J. Wong 
599afc51aaaSDarrick J. Wong 	BUG_ON(pos + len > iomap->offset + iomap->length);
600afc51aaaSDarrick J. Wong 
601afc51aaaSDarrick J. Wong 	if (fatal_signal_pending(current))
602afc51aaaSDarrick J. Wong 		return -EINTR;
603afc51aaaSDarrick J. Wong 
604afc51aaaSDarrick J. Wong 	if (page_ops && page_ops->page_prepare) {
605afc51aaaSDarrick J. Wong 		status = page_ops->page_prepare(inode, pos, len, iomap);
606afc51aaaSDarrick J. Wong 		if (status)
607afc51aaaSDarrick J. Wong 			return status;
608afc51aaaSDarrick J. Wong 	}
609afc51aaaSDarrick J. Wong 
610afc51aaaSDarrick J. Wong 	page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
611afc51aaaSDarrick J. Wong 	if (!page) {
612afc51aaaSDarrick J. Wong 		status = -ENOMEM;
613afc51aaaSDarrick J. Wong 		goto out_no_page;
614afc51aaaSDarrick J. Wong 	}
615afc51aaaSDarrick J. Wong 
616afc51aaaSDarrick J. Wong 	if (iomap->type == IOMAP_INLINE)
617afc51aaaSDarrick J. Wong 		iomap_read_inline_data(inode, page, iomap);
618afc51aaaSDarrick J. Wong 	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
619afc51aaaSDarrick J. Wong 		status = __block_write_begin_int(page, pos, len, NULL, iomap);
620afc51aaaSDarrick J. Wong 	else
621afc51aaaSDarrick J. Wong 		status = __iomap_write_begin(inode, pos, len, page, iomap);
622afc51aaaSDarrick J. Wong 
623afc51aaaSDarrick J. Wong 	if (unlikely(status))
624afc51aaaSDarrick J. Wong 		goto out_unlock;
625afc51aaaSDarrick J. Wong 
626afc51aaaSDarrick J. Wong 	*pagep = page;
627afc51aaaSDarrick J. Wong 	return 0;
628afc51aaaSDarrick J. Wong 
629afc51aaaSDarrick J. Wong out_unlock:
630afc51aaaSDarrick J. Wong 	unlock_page(page);
631afc51aaaSDarrick J. Wong 	put_page(page);
632afc51aaaSDarrick J. Wong 	iomap_write_failed(inode, pos, len);
633afc51aaaSDarrick J. Wong 
634afc51aaaSDarrick J. Wong out_no_page:
635afc51aaaSDarrick J. Wong 	if (page_ops && page_ops->page_done)
636afc51aaaSDarrick J. Wong 		page_ops->page_done(inode, pos, 0, NULL, iomap);
637afc51aaaSDarrick J. Wong 	return status;
638afc51aaaSDarrick J. Wong }
639afc51aaaSDarrick J. Wong 
640afc51aaaSDarrick J. Wong int
641afc51aaaSDarrick J. Wong iomap_set_page_dirty(struct page *page)
642afc51aaaSDarrick J. Wong {
643afc51aaaSDarrick J. Wong 	struct address_space *mapping = page_mapping(page);
644afc51aaaSDarrick J. Wong 	int newly_dirty;
645afc51aaaSDarrick J. Wong 
646afc51aaaSDarrick J. Wong 	if (unlikely(!mapping))
647afc51aaaSDarrick J. Wong 		return !TestSetPageDirty(page);
648afc51aaaSDarrick J. Wong 
649afc51aaaSDarrick J. Wong 	/*
650afc51aaaSDarrick J. Wong 	 * Lock out page->mem_cgroup migration to keep PageDirty
651afc51aaaSDarrick J. Wong 	 * synchronized with per-memcg dirty page counters.
652afc51aaaSDarrick J. Wong 	 */
653afc51aaaSDarrick J. Wong 	lock_page_memcg(page);
654afc51aaaSDarrick J. Wong 	newly_dirty = !TestSetPageDirty(page);
655afc51aaaSDarrick J. Wong 	if (newly_dirty)
656afc51aaaSDarrick J. Wong 		__set_page_dirty(page, mapping, 0);
657afc51aaaSDarrick J. Wong 	unlock_page_memcg(page);
658afc51aaaSDarrick J. Wong 
659afc51aaaSDarrick J. Wong 	if (newly_dirty)
660afc51aaaSDarrick J. Wong 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
661afc51aaaSDarrick J. Wong 	return newly_dirty;
662afc51aaaSDarrick J. Wong }
663afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
664afc51aaaSDarrick J. Wong 
665afc51aaaSDarrick J. Wong static int
666afc51aaaSDarrick J. Wong __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
667afc51aaaSDarrick J. Wong 		unsigned copied, struct page *page, struct iomap *iomap)
668afc51aaaSDarrick J. Wong {
669afc51aaaSDarrick J. Wong 	flush_dcache_page(page);
670afc51aaaSDarrick J. Wong 
671afc51aaaSDarrick J. Wong 	/*
672afc51aaaSDarrick J. Wong 	 * The blocks that were entirely written will now be uptodate, so we
673afc51aaaSDarrick J. Wong 	 * don't have to worry about a readpage reading them and overwriting a
674afc51aaaSDarrick J. Wong 	 * partial write.  However if we have encountered a short write and only
675afc51aaaSDarrick J. Wong 	 * partially written into a block, it will not be marked uptodate, so a
676afc51aaaSDarrick J. Wong 	 * readpage might come in and destroy our partial write.
677afc51aaaSDarrick J. Wong 	 *
678afc51aaaSDarrick J. Wong 	 * Do the simplest thing, and just treat any short write to a non
679afc51aaaSDarrick J. Wong 	 * uptodate page as a zero-length write, and force the caller to redo
680afc51aaaSDarrick J. Wong 	 * the whole thing.
681afc51aaaSDarrick J. Wong 	 */
682afc51aaaSDarrick J. Wong 	if (unlikely(copied < len && !PageUptodate(page)))
683afc51aaaSDarrick J. Wong 		return 0;
684afc51aaaSDarrick J. Wong 	iomap_set_range_uptodate(page, offset_in_page(pos), len);
685afc51aaaSDarrick J. Wong 	iomap_set_page_dirty(page);
686afc51aaaSDarrick J. Wong 	return copied;
687afc51aaaSDarrick J. Wong }
688afc51aaaSDarrick J. Wong 
689afc51aaaSDarrick J. Wong static int
690afc51aaaSDarrick J. Wong iomap_write_end_inline(struct inode *inode, struct page *page,
691afc51aaaSDarrick J. Wong 		struct iomap *iomap, loff_t pos, unsigned copied)
692afc51aaaSDarrick J. Wong {
693afc51aaaSDarrick J. Wong 	void *addr;
694afc51aaaSDarrick J. Wong 
695afc51aaaSDarrick J. Wong 	WARN_ON_ONCE(!PageUptodate(page));
696afc51aaaSDarrick J. Wong 	BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
697afc51aaaSDarrick J. Wong 
698afc51aaaSDarrick J. Wong 	addr = kmap_atomic(page);
699afc51aaaSDarrick J. Wong 	memcpy(iomap->inline_data + pos, addr + pos, copied);
700afc51aaaSDarrick J. Wong 	kunmap_atomic(addr);
701afc51aaaSDarrick J. Wong 
702afc51aaaSDarrick J. Wong 	mark_inode_dirty(inode);
703afc51aaaSDarrick J. Wong 	return copied;
704afc51aaaSDarrick J. Wong }
705afc51aaaSDarrick J. Wong 
706afc51aaaSDarrick J. Wong static int
707afc51aaaSDarrick J. Wong iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
708afc51aaaSDarrick J. Wong 		unsigned copied, struct page *page, struct iomap *iomap)
709afc51aaaSDarrick J. Wong {
710afc51aaaSDarrick J. Wong 	const struct iomap_page_ops *page_ops = iomap->page_ops;
711afc51aaaSDarrick J. Wong 	loff_t old_size = inode->i_size;
712afc51aaaSDarrick J. Wong 	int ret;
713afc51aaaSDarrick J. Wong 
714afc51aaaSDarrick J. Wong 	if (iomap->type == IOMAP_INLINE) {
715afc51aaaSDarrick J. Wong 		ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
716afc51aaaSDarrick J. Wong 	} else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
717afc51aaaSDarrick J. Wong 		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
718afc51aaaSDarrick J. Wong 				page, NULL);
719afc51aaaSDarrick J. Wong 	} else {
720afc51aaaSDarrick J. Wong 		ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
721afc51aaaSDarrick J. Wong 	}
722afc51aaaSDarrick J. Wong 
723afc51aaaSDarrick J. Wong 	/*
724afc51aaaSDarrick J. Wong 	 * Update the in-memory inode size after copying the data into the page
725afc51aaaSDarrick J. Wong 	 * cache.  It's up to the file system to write the updated size to disk,
726afc51aaaSDarrick J. Wong 	 * preferably after I/O completion so that no stale data is exposed.
727afc51aaaSDarrick J. Wong 	 */
728afc51aaaSDarrick J. Wong 	if (pos + ret > old_size) {
729afc51aaaSDarrick J. Wong 		i_size_write(inode, pos + ret);
730afc51aaaSDarrick J. Wong 		iomap->flags |= IOMAP_F_SIZE_CHANGED;
731afc51aaaSDarrick J. Wong 	}
732afc51aaaSDarrick J. Wong 	unlock_page(page);
733afc51aaaSDarrick J. Wong 
734afc51aaaSDarrick J. Wong 	if (old_size < pos)
735afc51aaaSDarrick J. Wong 		pagecache_isize_extended(inode, old_size, pos);
736afc51aaaSDarrick J. Wong 	if (page_ops && page_ops->page_done)
737afc51aaaSDarrick J. Wong 		page_ops->page_done(inode, pos, ret, page, iomap);
738afc51aaaSDarrick J. Wong 	put_page(page);
739afc51aaaSDarrick J. Wong 
740afc51aaaSDarrick J. Wong 	if (ret < len)
741afc51aaaSDarrick J. Wong 		iomap_write_failed(inode, pos, len);
742afc51aaaSDarrick J. Wong 	return ret;
743afc51aaaSDarrick J. Wong }
744afc51aaaSDarrick J. Wong 
745afc51aaaSDarrick J. Wong static loff_t
746afc51aaaSDarrick J. Wong iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
747afc51aaaSDarrick J. Wong 		struct iomap *iomap)
748afc51aaaSDarrick J. Wong {
749afc51aaaSDarrick J. Wong 	struct iov_iter *i = data;
750afc51aaaSDarrick J. Wong 	long status = 0;
751afc51aaaSDarrick J. Wong 	ssize_t written = 0;
752afc51aaaSDarrick J. Wong 	unsigned int flags = AOP_FLAG_NOFS;
753afc51aaaSDarrick J. Wong 
754afc51aaaSDarrick J. Wong 	do {
755afc51aaaSDarrick J. Wong 		struct page *page;
756afc51aaaSDarrick J. Wong 		unsigned long offset;	/* Offset into pagecache page */
757afc51aaaSDarrick J. Wong 		unsigned long bytes;	/* Bytes to write to page */
758afc51aaaSDarrick J. Wong 		size_t copied;		/* Bytes copied from user */
759afc51aaaSDarrick J. Wong 
760afc51aaaSDarrick J. Wong 		offset = offset_in_page(pos);
761afc51aaaSDarrick J. Wong 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
762afc51aaaSDarrick J. Wong 						iov_iter_count(i));
763afc51aaaSDarrick J. Wong again:
764afc51aaaSDarrick J. Wong 		if (bytes > length)
765afc51aaaSDarrick J. Wong 			bytes = length;
766afc51aaaSDarrick J. Wong 
767afc51aaaSDarrick J. Wong 		/*
768afc51aaaSDarrick J. Wong 		 * Bring in the user page that we will copy from _first_.
769afc51aaaSDarrick J. Wong 		 * Otherwise there's a nasty deadlock on copying from the
770afc51aaaSDarrick J. Wong 		 * same page as we're writing to, without it being marked
771afc51aaaSDarrick J. Wong 		 * up-to-date.
772afc51aaaSDarrick J. Wong 		 *
773afc51aaaSDarrick J. Wong 		 * Not only is this an optimisation, but it is also required
774afc51aaaSDarrick J. Wong 		 * to check that the address is actually valid, when atomic
775afc51aaaSDarrick J. Wong 		 * usercopies are used, below.
776afc51aaaSDarrick J. Wong 		 */
777afc51aaaSDarrick J. Wong 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
778afc51aaaSDarrick J. Wong 			status = -EFAULT;
779afc51aaaSDarrick J. Wong 			break;
780afc51aaaSDarrick J. Wong 		}
781afc51aaaSDarrick J. Wong 
782afc51aaaSDarrick J. Wong 		status = iomap_write_begin(inode, pos, bytes, flags, &page,
783afc51aaaSDarrick J. Wong 				iomap);
784afc51aaaSDarrick J. Wong 		if (unlikely(status))
785afc51aaaSDarrick J. Wong 			break;
786afc51aaaSDarrick J. Wong 
787afc51aaaSDarrick J. Wong 		if (mapping_writably_mapped(inode->i_mapping))
788afc51aaaSDarrick J. Wong 			flush_dcache_page(page);
789afc51aaaSDarrick J. Wong 
790afc51aaaSDarrick J. Wong 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
791afc51aaaSDarrick J. Wong 
792afc51aaaSDarrick J. Wong 		flush_dcache_page(page);
793afc51aaaSDarrick J. Wong 
794afc51aaaSDarrick J. Wong 		status = iomap_write_end(inode, pos, bytes, copied, page,
795afc51aaaSDarrick J. Wong 				iomap);
796afc51aaaSDarrick J. Wong 		if (unlikely(status < 0))
797afc51aaaSDarrick J. Wong 			break;
798afc51aaaSDarrick J. Wong 		copied = status;
799afc51aaaSDarrick J. Wong 
800afc51aaaSDarrick J. Wong 		cond_resched();
801afc51aaaSDarrick J. Wong 
802afc51aaaSDarrick J. Wong 		iov_iter_advance(i, copied);
803afc51aaaSDarrick J. Wong 		if (unlikely(copied == 0)) {
804afc51aaaSDarrick J. Wong 			/*
805afc51aaaSDarrick J. Wong 			 * If we were unable to copy any data at all, we must
806afc51aaaSDarrick J. Wong 			 * fall back to a single segment length write.
807afc51aaaSDarrick J. Wong 			 *
808afc51aaaSDarrick J. Wong 			 * If we didn't fallback here, we could livelock
809afc51aaaSDarrick J. Wong 			 * because not all segments in the iov can be copied at
810afc51aaaSDarrick J. Wong 			 * once without a pagefault.
811afc51aaaSDarrick J. Wong 			 */
812afc51aaaSDarrick J. Wong 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
813afc51aaaSDarrick J. Wong 						iov_iter_single_seg_count(i));
814afc51aaaSDarrick J. Wong 			goto again;
815afc51aaaSDarrick J. Wong 		}
816afc51aaaSDarrick J. Wong 		pos += copied;
817afc51aaaSDarrick J. Wong 		written += copied;
818afc51aaaSDarrick J. Wong 		length -= copied;
819afc51aaaSDarrick J. Wong 
820afc51aaaSDarrick J. Wong 		balance_dirty_pages_ratelimited(inode->i_mapping);
821afc51aaaSDarrick J. Wong 	} while (iov_iter_count(i) && length);
822afc51aaaSDarrick J. Wong 
823afc51aaaSDarrick J. Wong 	return written ? written : status;
824afc51aaaSDarrick J. Wong }
825afc51aaaSDarrick J. Wong 
826afc51aaaSDarrick J. Wong ssize_t
827afc51aaaSDarrick J. Wong iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
828afc51aaaSDarrick J. Wong 		const struct iomap_ops *ops)
829afc51aaaSDarrick J. Wong {
830afc51aaaSDarrick J. Wong 	struct inode *inode = iocb->ki_filp->f_mapping->host;
831afc51aaaSDarrick J. Wong 	loff_t pos = iocb->ki_pos, ret = 0, written = 0;
832afc51aaaSDarrick J. Wong 
833afc51aaaSDarrick J. Wong 	while (iov_iter_count(iter)) {
834afc51aaaSDarrick J. Wong 		ret = iomap_apply(inode, pos, iov_iter_count(iter),
835afc51aaaSDarrick J. Wong 				IOMAP_WRITE, ops, iter, iomap_write_actor);
836afc51aaaSDarrick J. Wong 		if (ret <= 0)
837afc51aaaSDarrick J. Wong 			break;
838afc51aaaSDarrick J. Wong 		pos += ret;
839afc51aaaSDarrick J. Wong 		written += ret;
840afc51aaaSDarrick J. Wong 	}
841afc51aaaSDarrick J. Wong 
842afc51aaaSDarrick J. Wong 	return written ? written : ret;
843afc51aaaSDarrick J. Wong }
844afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
845afc51aaaSDarrick J. Wong 
846afc51aaaSDarrick J. Wong static struct page *
847afc51aaaSDarrick J. Wong __iomap_read_page(struct inode *inode, loff_t offset)
848afc51aaaSDarrick J. Wong {
849afc51aaaSDarrick J. Wong 	struct address_space *mapping = inode->i_mapping;
850afc51aaaSDarrick J. Wong 	struct page *page;
851afc51aaaSDarrick J. Wong 
852afc51aaaSDarrick J. Wong 	page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
853afc51aaaSDarrick J. Wong 	if (IS_ERR(page))
854afc51aaaSDarrick J. Wong 		return page;
855afc51aaaSDarrick J. Wong 	if (!PageUptodate(page)) {
856afc51aaaSDarrick J. Wong 		put_page(page);
857afc51aaaSDarrick J. Wong 		return ERR_PTR(-EIO);
858afc51aaaSDarrick J. Wong 	}
859afc51aaaSDarrick J. Wong 	return page;
860afc51aaaSDarrick J. Wong }
861afc51aaaSDarrick J. Wong 
862afc51aaaSDarrick J. Wong static loff_t
863afc51aaaSDarrick J. Wong iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
864afc51aaaSDarrick J. Wong 		struct iomap *iomap)
865afc51aaaSDarrick J. Wong {
866afc51aaaSDarrick J. Wong 	long status = 0;
867afc51aaaSDarrick J. Wong 	ssize_t written = 0;
868afc51aaaSDarrick J. Wong 
869afc51aaaSDarrick J. Wong 	do {
870afc51aaaSDarrick J. Wong 		struct page *page, *rpage;
871afc51aaaSDarrick J. Wong 		unsigned long offset;	/* Offset into pagecache page */
872afc51aaaSDarrick J. Wong 		unsigned long bytes;	/* Bytes to write to page */
873afc51aaaSDarrick J. Wong 
874afc51aaaSDarrick J. Wong 		offset = offset_in_page(pos);
875afc51aaaSDarrick J. Wong 		bytes = min_t(loff_t, PAGE_SIZE - offset, length);
876afc51aaaSDarrick J. Wong 
877afc51aaaSDarrick J. Wong 		rpage = __iomap_read_page(inode, pos);
878afc51aaaSDarrick J. Wong 		if (IS_ERR(rpage))
879afc51aaaSDarrick J. Wong 			return PTR_ERR(rpage);
880afc51aaaSDarrick J. Wong 
881afc51aaaSDarrick J. Wong 		status = iomap_write_begin(inode, pos, bytes,
882afc51aaaSDarrick J. Wong 					   AOP_FLAG_NOFS, &page, iomap);
883afc51aaaSDarrick J. Wong 		put_page(rpage);
884afc51aaaSDarrick J. Wong 		if (unlikely(status))
885afc51aaaSDarrick J. Wong 			return status;
886afc51aaaSDarrick J. Wong 
887afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(!PageUptodate(page));
888afc51aaaSDarrick J. Wong 
889afc51aaaSDarrick J. Wong 		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
890afc51aaaSDarrick J. Wong 		if (unlikely(status <= 0)) {
891afc51aaaSDarrick J. Wong 			if (WARN_ON_ONCE(status == 0))
892afc51aaaSDarrick J. Wong 				return -EIO;
893afc51aaaSDarrick J. Wong 			return status;
894afc51aaaSDarrick J. Wong 		}
895afc51aaaSDarrick J. Wong 
896afc51aaaSDarrick J. Wong 		cond_resched();
897afc51aaaSDarrick J. Wong 
898afc51aaaSDarrick J. Wong 		pos += status;
899afc51aaaSDarrick J. Wong 		written += status;
900afc51aaaSDarrick J. Wong 		length -= status;
901afc51aaaSDarrick J. Wong 
902afc51aaaSDarrick J. Wong 		balance_dirty_pages_ratelimited(inode->i_mapping);
903afc51aaaSDarrick J. Wong 	} while (length);
904afc51aaaSDarrick J. Wong 
905afc51aaaSDarrick J. Wong 	return written;
906afc51aaaSDarrick J. Wong }
907afc51aaaSDarrick J. Wong 
908afc51aaaSDarrick J. Wong int
909afc51aaaSDarrick J. Wong iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
910afc51aaaSDarrick J. Wong 		const struct iomap_ops *ops)
911afc51aaaSDarrick J. Wong {
912afc51aaaSDarrick J. Wong 	loff_t ret;
913afc51aaaSDarrick J. Wong 
914afc51aaaSDarrick J. Wong 	while (len) {
915afc51aaaSDarrick J. Wong 		ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
916afc51aaaSDarrick J. Wong 				iomap_dirty_actor);
917afc51aaaSDarrick J. Wong 		if (ret <= 0)
918afc51aaaSDarrick J. Wong 			return ret;
919afc51aaaSDarrick J. Wong 		pos += ret;
920afc51aaaSDarrick J. Wong 		len -= ret;
921afc51aaaSDarrick J. Wong 	}
922afc51aaaSDarrick J. Wong 
923afc51aaaSDarrick J. Wong 	return 0;
924afc51aaaSDarrick J. Wong }
925afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_file_dirty);
926afc51aaaSDarrick J. Wong 
927afc51aaaSDarrick J. Wong static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
928afc51aaaSDarrick J. Wong 		unsigned bytes, struct iomap *iomap)
929afc51aaaSDarrick J. Wong {
930afc51aaaSDarrick J. Wong 	struct page *page;
931afc51aaaSDarrick J. Wong 	int status;
932afc51aaaSDarrick J. Wong 
933afc51aaaSDarrick J. Wong 	status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
934afc51aaaSDarrick J. Wong 				   iomap);
935afc51aaaSDarrick J. Wong 	if (status)
936afc51aaaSDarrick J. Wong 		return status;
937afc51aaaSDarrick J. Wong 
938afc51aaaSDarrick J. Wong 	zero_user(page, offset, bytes);
939afc51aaaSDarrick J. Wong 	mark_page_accessed(page);
940afc51aaaSDarrick J. Wong 
941afc51aaaSDarrick J. Wong 	return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
942afc51aaaSDarrick J. Wong }
943afc51aaaSDarrick J. Wong 
944afc51aaaSDarrick J. Wong static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
945afc51aaaSDarrick J. Wong 		struct iomap *iomap)
946afc51aaaSDarrick J. Wong {
947afc51aaaSDarrick J. Wong 	return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
948afc51aaaSDarrick J. Wong 			iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
949afc51aaaSDarrick J. Wong }
950afc51aaaSDarrick J. Wong 
951afc51aaaSDarrick J. Wong static loff_t
952afc51aaaSDarrick J. Wong iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
953afc51aaaSDarrick J. Wong 		void *data, struct iomap *iomap)
954afc51aaaSDarrick J. Wong {
955afc51aaaSDarrick J. Wong 	bool *did_zero = data;
956afc51aaaSDarrick J. Wong 	loff_t written = 0;
957afc51aaaSDarrick J. Wong 	int status;
958afc51aaaSDarrick J. Wong 
959afc51aaaSDarrick J. Wong 	/* already zeroed?  we're done. */
960afc51aaaSDarrick J. Wong 	if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
961afc51aaaSDarrick J. Wong 		return count;
962afc51aaaSDarrick J. Wong 
963afc51aaaSDarrick J. Wong 	do {
964afc51aaaSDarrick J. Wong 		unsigned offset, bytes;
965afc51aaaSDarrick J. Wong 
966afc51aaaSDarrick J. Wong 		offset = offset_in_page(pos);
967afc51aaaSDarrick J. Wong 		bytes = min_t(loff_t, PAGE_SIZE - offset, count);
968afc51aaaSDarrick J. Wong 
969afc51aaaSDarrick J. Wong 		if (IS_DAX(inode))
970afc51aaaSDarrick J. Wong 			status = iomap_dax_zero(pos, offset, bytes, iomap);
971afc51aaaSDarrick J. Wong 		else
972afc51aaaSDarrick J. Wong 			status = iomap_zero(inode, pos, offset, bytes, iomap);
973afc51aaaSDarrick J. Wong 		if (status < 0)
974afc51aaaSDarrick J. Wong 			return status;
975afc51aaaSDarrick J. Wong 
976afc51aaaSDarrick J. Wong 		pos += bytes;
977afc51aaaSDarrick J. Wong 		count -= bytes;
978afc51aaaSDarrick J. Wong 		written += bytes;
979afc51aaaSDarrick J. Wong 		if (did_zero)
980afc51aaaSDarrick J. Wong 			*did_zero = true;
981afc51aaaSDarrick J. Wong 	} while (count > 0);
982afc51aaaSDarrick J. Wong 
983afc51aaaSDarrick J. Wong 	return written;
984afc51aaaSDarrick J. Wong }
985afc51aaaSDarrick J. Wong 
986afc51aaaSDarrick J. Wong int
987afc51aaaSDarrick J. Wong iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
988afc51aaaSDarrick J. Wong 		const struct iomap_ops *ops)
989afc51aaaSDarrick J. Wong {
990afc51aaaSDarrick J. Wong 	loff_t ret;
991afc51aaaSDarrick J. Wong 
992afc51aaaSDarrick J. Wong 	while (len > 0) {
993afc51aaaSDarrick J. Wong 		ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
994afc51aaaSDarrick J. Wong 				ops, did_zero, iomap_zero_range_actor);
995afc51aaaSDarrick J. Wong 		if (ret <= 0)
996afc51aaaSDarrick J. Wong 			return ret;
997afc51aaaSDarrick J. Wong 
998afc51aaaSDarrick J. Wong 		pos += ret;
999afc51aaaSDarrick J. Wong 		len -= ret;
1000afc51aaaSDarrick J. Wong 	}
1001afc51aaaSDarrick J. Wong 
1002afc51aaaSDarrick J. Wong 	return 0;
1003afc51aaaSDarrick J. Wong }
1004afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_zero_range);
1005afc51aaaSDarrick J. Wong 
1006afc51aaaSDarrick J. Wong int
1007afc51aaaSDarrick J. Wong iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1008afc51aaaSDarrick J. Wong 		const struct iomap_ops *ops)
1009afc51aaaSDarrick J. Wong {
1010afc51aaaSDarrick J. Wong 	unsigned int blocksize = i_blocksize(inode);
1011afc51aaaSDarrick J. Wong 	unsigned int off = pos & (blocksize - 1);
1012afc51aaaSDarrick J. Wong 
1013afc51aaaSDarrick J. Wong 	/* Block boundary? Nothing to do */
1014afc51aaaSDarrick J. Wong 	if (!off)
1015afc51aaaSDarrick J. Wong 		return 0;
1016afc51aaaSDarrick J. Wong 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1017afc51aaaSDarrick J. Wong }
1018afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_truncate_page);
1019afc51aaaSDarrick J. Wong 
1020afc51aaaSDarrick J. Wong static loff_t
1021afc51aaaSDarrick J. Wong iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1022afc51aaaSDarrick J. Wong 		void *data, struct iomap *iomap)
1023afc51aaaSDarrick J. Wong {
1024afc51aaaSDarrick J. Wong 	struct page *page = data;
1025afc51aaaSDarrick J. Wong 	int ret;
1026afc51aaaSDarrick J. Wong 
1027afc51aaaSDarrick J. Wong 	if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1028afc51aaaSDarrick J. Wong 		ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1029afc51aaaSDarrick J. Wong 		if (ret)
1030afc51aaaSDarrick J. Wong 			return ret;
1031afc51aaaSDarrick J. Wong 		block_commit_write(page, 0, length);
1032afc51aaaSDarrick J. Wong 	} else {
1033afc51aaaSDarrick J. Wong 		WARN_ON_ONCE(!PageUptodate(page));
1034afc51aaaSDarrick J. Wong 		iomap_page_create(inode, page);
1035afc51aaaSDarrick J. Wong 		set_page_dirty(page);
1036afc51aaaSDarrick J. Wong 	}
1037afc51aaaSDarrick J. Wong 
1038afc51aaaSDarrick J. Wong 	return length;
1039afc51aaaSDarrick J. Wong }
1040afc51aaaSDarrick J. Wong 
1041afc51aaaSDarrick J. Wong vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1042afc51aaaSDarrick J. Wong {
1043afc51aaaSDarrick J. Wong 	struct page *page = vmf->page;
1044afc51aaaSDarrick J. Wong 	struct inode *inode = file_inode(vmf->vma->vm_file);
1045afc51aaaSDarrick J. Wong 	unsigned long length;
1046afc51aaaSDarrick J. Wong 	loff_t offset, size;
1047afc51aaaSDarrick J. Wong 	ssize_t ret;
1048afc51aaaSDarrick J. Wong 
1049afc51aaaSDarrick J. Wong 	lock_page(page);
1050afc51aaaSDarrick J. Wong 	size = i_size_read(inode);
1051afc51aaaSDarrick J. Wong 	if ((page->mapping != inode->i_mapping) ||
1052afc51aaaSDarrick J. Wong 	    (page_offset(page) > size)) {
1053afc51aaaSDarrick J. Wong 		/* We overload EFAULT to mean page got truncated */
1054afc51aaaSDarrick J. Wong 		ret = -EFAULT;
1055afc51aaaSDarrick J. Wong 		goto out_unlock;
1056afc51aaaSDarrick J. Wong 	}
1057afc51aaaSDarrick J. Wong 
1058afc51aaaSDarrick J. Wong 	/* page is wholly or partially inside EOF */
1059afc51aaaSDarrick J. Wong 	if (((page->index + 1) << PAGE_SHIFT) > size)
1060afc51aaaSDarrick J. Wong 		length = offset_in_page(size);
1061afc51aaaSDarrick J. Wong 	else
1062afc51aaaSDarrick J. Wong 		length = PAGE_SIZE;
1063afc51aaaSDarrick J. Wong 
1064afc51aaaSDarrick J. Wong 	offset = page_offset(page);
1065afc51aaaSDarrick J. Wong 	while (length > 0) {
1066afc51aaaSDarrick J. Wong 		ret = iomap_apply(inode, offset, length,
1067afc51aaaSDarrick J. Wong 				IOMAP_WRITE | IOMAP_FAULT, ops, page,
1068afc51aaaSDarrick J. Wong 				iomap_page_mkwrite_actor);
1069afc51aaaSDarrick J. Wong 		if (unlikely(ret <= 0))
1070afc51aaaSDarrick J. Wong 			goto out_unlock;
1071afc51aaaSDarrick J. Wong 		offset += ret;
1072afc51aaaSDarrick J. Wong 		length -= ret;
1073afc51aaaSDarrick J. Wong 	}
1074afc51aaaSDarrick J. Wong 
1075afc51aaaSDarrick J. Wong 	wait_for_stable_page(page);
1076afc51aaaSDarrick J. Wong 	return VM_FAULT_LOCKED;
1077afc51aaaSDarrick J. Wong out_unlock:
1078afc51aaaSDarrick J. Wong 	unlock_page(page);
1079afc51aaaSDarrick J. Wong 	return block_page_mkwrite_return(ret);
1080afc51aaaSDarrick J. Wong }
1081afc51aaaSDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1082