1db074436SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0
2db074436SDarrick J. Wong /*
3db074436SDarrick J. Wong * Copyright (C) 2010 Red Hat, Inc.
4a6d3d495SChristoph Hellwig * Copyright (c) 2016-2021 Christoph Hellwig.
5db074436SDarrick J. Wong */
6db074436SDarrick J. Wong #include <linux/module.h>
7db074436SDarrick J. Wong #include <linux/compiler.h>
8db074436SDarrick J. Wong #include <linux/fs.h>
9489734efSEric Biggers #include <linux/fscrypt.h>
104bdcd1ddSJens Axboe #include <linux/pagemap.h>
11db074436SDarrick J. Wong #include <linux/iomap.h>
12db074436SDarrick J. Wong #include <linux/backing-dev.h>
13db074436SDarrick J. Wong #include <linux/uio.h>
14db074436SDarrick J. Wong #include <linux/task_io_accounting_ops.h>
1560263d58SChristoph Hellwig #include "trace.h"
16db074436SDarrick J. Wong
17db074436SDarrick J. Wong #include "../internal.h"
18db074436SDarrick J. Wong
19db074436SDarrick J. Wong /*
20db074436SDarrick J. Wong * Private flags for iomap_dio, must not overlap with the public ones in
21db074436SDarrick J. Wong * iomap.h:
22db074436SDarrick J. Wong */
23*8c052fb3SJens Axboe #define IOMAP_DIO_CALLER_COMP (1U << 26)
247b3c14d1SJens Axboe #define IOMAP_DIO_INLINE_COMP (1U << 27)
253a0be38cSJens Axboe #define IOMAP_DIO_WRITE_THROUGH (1U << 28)
2644842f64SJens Axboe #define IOMAP_DIO_NEED_SYNC (1U << 29)
2744842f64SJens Axboe #define IOMAP_DIO_WRITE (1U << 30)
2844842f64SJens Axboe #define IOMAP_DIO_DIRTY (1U << 31)
29db074436SDarrick J. Wong
30db074436SDarrick J. Wong struct iomap_dio {
31db074436SDarrick J. Wong struct kiocb *iocb;
32838c4f3dSChristoph Hellwig const struct iomap_dio_ops *dops;
33db074436SDarrick J. Wong loff_t i_size;
34db074436SDarrick J. Wong loff_t size;
35db074436SDarrick J. Wong atomic_t ref;
36db074436SDarrick J. Wong unsigned flags;
37db074436SDarrick J. Wong int error;
384fdccaa0SAndreas Gruenbacher size_t done_before;
39db074436SDarrick J. Wong bool wait_for_completion;
40db074436SDarrick J. Wong
41db074436SDarrick J. Wong union {
42db074436SDarrick J. Wong /* used during submission and for synchronous completion: */
43db074436SDarrick J. Wong struct {
44db074436SDarrick J. Wong struct iov_iter *iter;
45db074436SDarrick J. Wong struct task_struct *waiter;
46db074436SDarrick J. Wong } submit;
47db074436SDarrick J. Wong
48db074436SDarrick J. Wong /* used for aio completion: */
49db074436SDarrick J. Wong struct {
50db074436SDarrick J. Wong struct work_struct work;
51db074436SDarrick J. Wong } aio;
52db074436SDarrick J. Wong };
53db074436SDarrick J. Wong };
54db074436SDarrick J. Wong
iomap_dio_alloc_bio(const struct iomap_iter * iter,struct iomap_dio * dio,unsigned short nr_vecs,blk_opf_t opf)55908c5490SChristoph Hellwig static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
56dbd4eb81SBart Van Assche struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
57908c5490SChristoph Hellwig {
58908c5490SChristoph Hellwig if (dio->dops && dio->dops->bio_set)
59908c5490SChristoph Hellwig return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
60908c5490SChristoph Hellwig GFP_KERNEL, dio->dops->bio_set);
61908c5490SChristoph Hellwig return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
62908c5490SChristoph Hellwig }
63908c5490SChristoph Hellwig
iomap_dio_submit_bio(const struct iomap_iter * iter,struct iomap_dio * dio,struct bio * bio,loff_t pos)64a6d3d495SChristoph Hellwig static void iomap_dio_submit_bio(const struct iomap_iter *iter,
65a6d3d495SChristoph Hellwig struct iomap_dio *dio, struct bio *bio, loff_t pos)
66db074436SDarrick J. Wong {
67daa99c5aSJens Axboe struct kiocb *iocb = dio->iocb;
68daa99c5aSJens Axboe
69db074436SDarrick J. Wong atomic_inc(&dio->ref);
70db074436SDarrick J. Wong
719650b453SMing Lei /* Sync dio can't be polled reliably */
72daa99c5aSJens Axboe if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
73daa99c5aSJens Axboe bio_set_polled(bio, iocb);
74daa99c5aSJens Axboe WRITE_ONCE(iocb->private, bio);
753e08773cSChristoph Hellwig }
76db074436SDarrick J. Wong
778cecd0baSGoldwyn Rodrigues if (dio->dops && dio->dops->submit_io)
783e08773cSChristoph Hellwig dio->dops->submit_io(iter, bio, pos);
798cecd0baSGoldwyn Rodrigues else
803e08773cSChristoph Hellwig submit_bio(bio);
81db074436SDarrick J. Wong }
82db074436SDarrick J. Wong
iomap_dio_complete(struct iomap_dio * dio)83c3d4ed1aSChristoph Hellwig ssize_t iomap_dio_complete(struct iomap_dio *dio)
84db074436SDarrick J. Wong {
85838c4f3dSChristoph Hellwig const struct iomap_dio_ops *dops = dio->dops;
86db074436SDarrick J. Wong struct kiocb *iocb = dio->iocb;
87db074436SDarrick J. Wong loff_t offset = iocb->ki_pos;
88838c4f3dSChristoph Hellwig ssize_t ret = dio->error;
89db074436SDarrick J. Wong
90838c4f3dSChristoph Hellwig if (dops && dops->end_io)
91838c4f3dSChristoph Hellwig ret = dops->end_io(iocb, dio->size, ret, dio->flags);
92db074436SDarrick J. Wong
93db074436SDarrick J. Wong if (likely(!ret)) {
94db074436SDarrick J. Wong ret = dio->size;
95db074436SDarrick J. Wong /* check for short read */
96db074436SDarrick J. Wong if (offset + ret > dio->i_size &&
97db074436SDarrick J. Wong !(dio->flags & IOMAP_DIO_WRITE))
98db074436SDarrick J. Wong ret = dio->i_size - offset;
99db074436SDarrick J. Wong }
100db074436SDarrick J. Wong
101db074436SDarrick J. Wong /*
102db074436SDarrick J. Wong * Try again to invalidate clean pages which might have been cached by
103db074436SDarrick J. Wong * non-direct readahead, or faulted in by get_user_pages() if the source
104db074436SDarrick J. Wong * of the write was an mmap'ed region of the file we're writing. Either
105db074436SDarrick J. Wong * one is a pretty crazy thing to do, so we don't support it 100%. If
106db074436SDarrick J. Wong * this invalidation fails, tough, the write still worked...
107db074436SDarrick J. Wong *
108838c4f3dSChristoph Hellwig * And this page cache invalidation has to be after ->end_io(), as some
109838c4f3dSChristoph Hellwig * filesystems convert unwritten extents to real allocations in
110838c4f3dSChristoph Hellwig * ->end_io() when necessary, otherwise a racing buffer read would cache
111db074436SDarrick J. Wong * zeros from unwritten extents.
112db074436SDarrick J. Wong */
113c402a9a9SChristoph Hellwig if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE))
114c402a9a9SChristoph Hellwig kiocb_invalidate_post_direct_write(iocb, dio->size);
115db074436SDarrick J. Wong
1161a31182eSGoldwyn Rodrigues inode_dio_end(file_inode(iocb->ki_filp));
117db074436SDarrick J. Wong
118936e114aSChristoph Hellwig if (ret > 0) {
119936e114aSChristoph Hellwig iocb->ki_pos += ret;
120936e114aSChristoph Hellwig
121936e114aSChristoph Hellwig /*
122936e114aSChristoph Hellwig * If this is a DSYNC write, make sure we push it to stable
123936e114aSChristoph Hellwig * storage now that we've written data.
124936e114aSChristoph Hellwig */
125936e114aSChristoph Hellwig if (dio->flags & IOMAP_DIO_NEED_SYNC)
126936e114aSChristoph Hellwig ret = generic_write_sync(iocb, ret);
1274fdccaa0SAndreas Gruenbacher if (ret > 0)
1284fdccaa0SAndreas Gruenbacher ret += dio->done_before;
129936e114aSChristoph Hellwig }
1303fd41721SRitesh Harjani (IBM) trace_iomap_dio_complete(iocb, dio->error, ret);
131db074436SDarrick J. Wong kfree(dio);
132db074436SDarrick J. Wong return ret;
133db074436SDarrick J. Wong }
134c3d4ed1aSChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_dio_complete);
135db074436SDarrick J. Wong
iomap_dio_deferred_complete(void * data)136*8c052fb3SJens Axboe static ssize_t iomap_dio_deferred_complete(void *data)
137*8c052fb3SJens Axboe {
138*8c052fb3SJens Axboe return iomap_dio_complete(data);
139*8c052fb3SJens Axboe }
140*8c052fb3SJens Axboe
iomap_dio_complete_work(struct work_struct * work)141db074436SDarrick J. Wong static void iomap_dio_complete_work(struct work_struct *work)
142db074436SDarrick J. Wong {
143db074436SDarrick J. Wong struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
144db074436SDarrick J. Wong struct kiocb *iocb = dio->iocb;
145db074436SDarrick J. Wong
1466b19b766SJens Axboe iocb->ki_complete(iocb, iomap_dio_complete(dio));
147db074436SDarrick J. Wong }
148db074436SDarrick J. Wong
149db074436SDarrick J. Wong /*
150db074436SDarrick J. Wong * Set an error in the dio if none is set yet. We have to use cmpxchg
151db074436SDarrick J. Wong * as the submission context and the completion context(s) can race to
152db074436SDarrick J. Wong * update the error.
153db074436SDarrick J. Wong */
iomap_dio_set_error(struct iomap_dio * dio,int ret)154db074436SDarrick J. Wong static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
155db074436SDarrick J. Wong {
156db074436SDarrick J. Wong cmpxchg(&dio->error, 0, ret);
157db074436SDarrick J. Wong }
158db074436SDarrick J. Wong
iomap_dio_bio_end_io(struct bio * bio)159908c5490SChristoph Hellwig void iomap_dio_bio_end_io(struct bio *bio)
160db074436SDarrick J. Wong {
161db074436SDarrick J. Wong struct iomap_dio *dio = bio->bi_private;
162db074436SDarrick J. Wong bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1633486237cSJens Axboe struct kiocb *iocb = dio->iocb;
164db074436SDarrick J. Wong
165db074436SDarrick J. Wong if (bio->bi_status)
166db074436SDarrick J. Wong iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
1673486237cSJens Axboe if (!atomic_dec_and_test(&dio->ref))
1683486237cSJens Axboe goto release_bio;
169db074436SDarrick J. Wong
1703486237cSJens Axboe /*
1713486237cSJens Axboe * Synchronous dio, task itself will handle any completion work
1723486237cSJens Axboe * that needs after IO. All we need to do is wake the task.
1733486237cSJens Axboe */
174db074436SDarrick J. Wong if (dio->wait_for_completion) {
175db074436SDarrick J. Wong struct task_struct *waiter = dio->submit.waiter;
1763486237cSJens Axboe
177db074436SDarrick J. Wong WRITE_ONCE(dio->submit.waiter, NULL);
178db074436SDarrick J. Wong blk_wake_io_task(waiter);
1793486237cSJens Axboe goto release_bio;
1803486237cSJens Axboe }
181db074436SDarrick J. Wong
1827b3c14d1SJens Axboe /*
1837b3c14d1SJens Axboe * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline
1847b3c14d1SJens Axboe */
1857b3c14d1SJens Axboe if (dio->flags & IOMAP_DIO_INLINE_COMP) {
1863486237cSJens Axboe WRITE_ONCE(iocb->private, NULL);
187db074436SDarrick J. Wong iomap_dio_complete_work(&dio->aio.work);
1883486237cSJens Axboe goto release_bio;
189db074436SDarrick J. Wong }
190db074436SDarrick J. Wong
1913486237cSJens Axboe /*
192*8c052fb3SJens Axboe * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule
193*8c052fb3SJens Axboe * our completion that way to avoid an async punt to a workqueue.
194*8c052fb3SJens Axboe */
195*8c052fb3SJens Axboe if (dio->flags & IOMAP_DIO_CALLER_COMP) {
196*8c052fb3SJens Axboe /* only polled IO cares about private cleared */
197*8c052fb3SJens Axboe iocb->private = dio;
198*8c052fb3SJens Axboe iocb->dio_complete = iomap_dio_deferred_complete;
199*8c052fb3SJens Axboe
200*8c052fb3SJens Axboe /*
201*8c052fb3SJens Axboe * Invoke ->ki_complete() directly. We've assigned our
202*8c052fb3SJens Axboe * dio_complete callback handler, and since the issuer set
203*8c052fb3SJens Axboe * IOCB_DIO_CALLER_COMP, we know their ki_complete handler will
204*8c052fb3SJens Axboe * notice ->dio_complete being set and will defer calling that
205*8c052fb3SJens Axboe * handler until it can be done from a safe task context.
206*8c052fb3SJens Axboe *
207*8c052fb3SJens Axboe * Note that the 'res' being passed in here is not important
208*8c052fb3SJens Axboe * for this case. The actual completion value of the request
209*8c052fb3SJens Axboe * will be gotten from dio_complete when that is run by the
210*8c052fb3SJens Axboe * issuer.
211*8c052fb3SJens Axboe */
212*8c052fb3SJens Axboe iocb->ki_complete(iocb, 0);
213*8c052fb3SJens Axboe goto release_bio;
214*8c052fb3SJens Axboe }
215*8c052fb3SJens Axboe
216*8c052fb3SJens Axboe /*
2173486237cSJens Axboe * Async DIO completion that requires filesystem level completion work
2183486237cSJens Axboe * gets punted to a work queue to complete as the operation may require
2193486237cSJens Axboe * more IO to be issued to finalise filesystem metadata changes or
2203486237cSJens Axboe * guarantee data integrity.
2213486237cSJens Axboe */
2223486237cSJens Axboe INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
2233486237cSJens Axboe queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
2243486237cSJens Axboe &dio->aio.work);
2253486237cSJens Axboe release_bio:
226db074436SDarrick J. Wong if (should_dirty) {
227db074436SDarrick J. Wong bio_check_pages_dirty(bio);
228db074436SDarrick J. Wong } else {
229db074436SDarrick J. Wong bio_release_pages(bio, false);
230db074436SDarrick J. Wong bio_put(bio);
231db074436SDarrick J. Wong }
232db074436SDarrick J. Wong }
233908c5490SChristoph Hellwig EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
234db074436SDarrick J. Wong
iomap_dio_zero(const struct iomap_iter * iter,struct iomap_dio * dio,loff_t pos,unsigned len)235a6d3d495SChristoph Hellwig static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
236a6d3d495SChristoph Hellwig loff_t pos, unsigned len)
237db074436SDarrick J. Wong {
238489734efSEric Biggers struct inode *inode = file_inode(dio->iocb->ki_filp);
239db074436SDarrick J. Wong struct page *page = ZERO_PAGE(0);
240db074436SDarrick J. Wong struct bio *bio;
241db074436SDarrick J. Wong
242908c5490SChristoph Hellwig bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
243489734efSEric Biggers fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
244489734efSEric Biggers GFP_KERNEL);
245a6d3d495SChristoph Hellwig bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
246db074436SDarrick J. Wong bio->bi_private = dio;
247db074436SDarrick J. Wong bio->bi_end_io = iomap_dio_bio_end_io;
248db074436SDarrick J. Wong
249db074436SDarrick J. Wong __bio_add_page(bio, page, len, 0);
250a6d3d495SChristoph Hellwig iomap_dio_submit_bio(iter, dio, bio, pos);
251db074436SDarrick J. Wong }
252db074436SDarrick J. Wong
253c3b0e880SNaohiro Aota /*
254c3b0e880SNaohiro Aota * Figure out the bio's operation flags from the dio request, the
255c3b0e880SNaohiro Aota * mapping, and whether or not we want FUA. Note that we can end up
2563a0be38cSJens Axboe * clearing the WRITE_THROUGH flag in the dio request.
257c3b0e880SNaohiro Aota */
iomap_dio_bio_opflags(struct iomap_dio * dio,const struct iomap * iomap,bool use_fua)258dbd4eb81SBart Van Assche static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
259a6d3d495SChristoph Hellwig const struct iomap *iomap, bool use_fua)
260c3b0e880SNaohiro Aota {
261dbd4eb81SBart Van Assche blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
262c3b0e880SNaohiro Aota
2638e81aa16SChristoph Hellwig if (!(dio->flags & IOMAP_DIO_WRITE))
264c3b0e880SNaohiro Aota return REQ_OP_READ;
265c3b0e880SNaohiro Aota
266c3b0e880SNaohiro Aota opflags |= REQ_OP_WRITE;
267c3b0e880SNaohiro Aota if (use_fua)
268c3b0e880SNaohiro Aota opflags |= REQ_FUA;
269c3b0e880SNaohiro Aota else
2703a0be38cSJens Axboe dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
271c3b0e880SNaohiro Aota
272c3b0e880SNaohiro Aota return opflags;
273c3b0e880SNaohiro Aota }
274c3b0e880SNaohiro Aota
iomap_dio_bio_iter(const struct iomap_iter * iter,struct iomap_dio * dio)275a6d3d495SChristoph Hellwig static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
276a6d3d495SChristoph Hellwig struct iomap_dio *dio)
277db074436SDarrick J. Wong {
278a6d3d495SChristoph Hellwig const struct iomap *iomap = &iter->iomap;
279a6d3d495SChristoph Hellwig struct inode *inode = iter->inode;
280db074436SDarrick J. Wong unsigned int fs_block_size = i_blocksize(inode), pad;
281a6d3d495SChristoph Hellwig loff_t length = iomap_length(iter);
282a6d3d495SChristoph Hellwig loff_t pos = iter->pos;
283dbd4eb81SBart Van Assche blk_opf_t bio_opf;
284db074436SDarrick J. Wong struct bio *bio;
285db074436SDarrick J. Wong bool need_zeroout = false;
286db074436SDarrick J. Wong bool use_fua = false;
287db074436SDarrick J. Wong int nr_pages, ret = 0;
288db074436SDarrick J. Wong size_t copied = 0;
289f550ee9bSJan Kara size_t orig_count;
290db074436SDarrick J. Wong
291f1bd37a4SKeith Busch if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
292bf8d0853SKeith Busch !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
293db074436SDarrick J. Wong return -EINVAL;
294db074436SDarrick J. Wong
295db074436SDarrick J. Wong if (iomap->type == IOMAP_UNWRITTEN) {
296db074436SDarrick J. Wong dio->flags |= IOMAP_DIO_UNWRITTEN;
297db074436SDarrick J. Wong need_zeroout = true;
298db074436SDarrick J. Wong }
299db074436SDarrick J. Wong
300db074436SDarrick J. Wong if (iomap->flags & IOMAP_F_SHARED)
301db074436SDarrick J. Wong dio->flags |= IOMAP_DIO_COW;
302db074436SDarrick J. Wong
303db074436SDarrick J. Wong if (iomap->flags & IOMAP_F_NEW) {
304db074436SDarrick J. Wong need_zeroout = true;
305db074436SDarrick J. Wong } else if (iomap->type == IOMAP_MAPPED) {
306db074436SDarrick J. Wong /*
307db074436SDarrick J. Wong * Use a FUA write if we need datasync semantics, this is a pure
308db074436SDarrick J. Wong * data IO that doesn't require any metadata updates (including
309db074436SDarrick J. Wong * after IO completion such as unwritten extent conversion) and
3103a0be38cSJens Axboe * the underlying device either supports FUA or doesn't have
3113a0be38cSJens Axboe * a volatile write cache. This allows us to avoid cache flushes
312*8c052fb3SJens Axboe * on IO completion. If we can't use writethrough and need to
313*8c052fb3SJens Axboe * sync, disable in-task completions as dio completion will
314*8c052fb3SJens Axboe * need to call generic_write_sync() which will do a blocking
315*8c052fb3SJens Axboe * fsync / cache flush call.
316db074436SDarrick J. Wong */
317db074436SDarrick J. Wong if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
3183a0be38cSJens Axboe (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
3193a0be38cSJens Axboe (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
320db074436SDarrick J. Wong use_fua = true;
321*8c052fb3SJens Axboe else if (dio->flags & IOMAP_DIO_NEED_SYNC)
322*8c052fb3SJens Axboe dio->flags &= ~IOMAP_DIO_CALLER_COMP;
323db074436SDarrick J. Wong }
324db074436SDarrick J. Wong
325db074436SDarrick J. Wong /*
326f550ee9bSJan Kara * Save the original count and trim the iter to just the extent we
327f550ee9bSJan Kara * are operating on right now. The iter will be re-expanded once
328f550ee9bSJan Kara * we are done.
329db074436SDarrick J. Wong */
330f550ee9bSJan Kara orig_count = iov_iter_count(dio->submit.iter);
331f550ee9bSJan Kara iov_iter_truncate(dio->submit.iter, length);
332db074436SDarrick J. Wong
3333e1a88ecSPavel Begunkov if (!iov_iter_count(dio->submit.iter))
334f550ee9bSJan Kara goto out;
335db074436SDarrick J. Wong
336f79d4749SChristoph Hellwig /*
337*8c052fb3SJens Axboe * We can only do deferred completion for pure overwrites that
338*8c052fb3SJens Axboe * don't require additional IO at completion. This rules out
339*8c052fb3SJens Axboe * writes that need zeroing or extent conversion, extend
340*8c052fb3SJens Axboe * the file size, or issue journal IO or cache flushes
341*8c052fb3SJens Axboe * during completion processing.
342f79d4749SChristoph Hellwig */
343f79d4749SChristoph Hellwig if (need_zeroout ||
344*8c052fb3SJens Axboe ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
345f79d4749SChristoph Hellwig ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
346*8c052fb3SJens Axboe dio->flags &= ~IOMAP_DIO_CALLER_COMP;
347*8c052fb3SJens Axboe
348*8c052fb3SJens Axboe /*
349*8c052fb3SJens Axboe * The rules for polled IO completions follow the guidelines as the
350*8c052fb3SJens Axboe * ones we set for inline and deferred completions. If none of those
351*8c052fb3SJens Axboe * are available for this IO, clear the polled flag.
352*8c052fb3SJens Axboe */
353*8c052fb3SJens Axboe if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
354f79d4749SChristoph Hellwig dio->iocb->ki_flags &= ~IOCB_HIPRI;
355f79d4749SChristoph Hellwig
356db074436SDarrick J. Wong if (need_zeroout) {
357db074436SDarrick J. Wong /* zero out from the start of the block to the write offset */
358db074436SDarrick J. Wong pad = pos & (fs_block_size - 1);
359db074436SDarrick J. Wong if (pad)
360a6d3d495SChristoph Hellwig iomap_dio_zero(iter, dio, pos - pad, pad);
361db074436SDarrick J. Wong }
362db074436SDarrick J. Wong
363c3b0e880SNaohiro Aota /*
364c3b0e880SNaohiro Aota * Set the operation flags early so that bio_iov_iter_get_pages
365c3b0e880SNaohiro Aota * can set up the page vector appropriately for a ZONE_APPEND
366c3b0e880SNaohiro Aota * operation.
367c3b0e880SNaohiro Aota */
368c3b0e880SNaohiro Aota bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
369c3b0e880SNaohiro Aota
370a8affc03SChristoph Hellwig nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
371db074436SDarrick J. Wong do {
372db074436SDarrick J. Wong size_t n;
373db074436SDarrick J. Wong if (dio->error) {
374db074436SDarrick J. Wong iov_iter_revert(dio->submit.iter, copied);
375f550ee9bSJan Kara copied = ret = 0;
376f550ee9bSJan Kara goto out;
377db074436SDarrick J. Wong }
378db074436SDarrick J. Wong
379908c5490SChristoph Hellwig bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
380489734efSEric Biggers fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
381489734efSEric Biggers GFP_KERNEL);
382db074436SDarrick J. Wong bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
383db074436SDarrick J. Wong bio->bi_ioprio = dio->iocb->ki_ioprio;
384db074436SDarrick J. Wong bio->bi_private = dio;
385db074436SDarrick J. Wong bio->bi_end_io = iomap_dio_bio_end_io;
386db074436SDarrick J. Wong
387f550ee9bSJan Kara ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
388db074436SDarrick J. Wong if (unlikely(ret)) {
389db074436SDarrick J. Wong /*
390db074436SDarrick J. Wong * We have to stop part way through an IO. We must fall
391db074436SDarrick J. Wong * through to the sub-block tail zeroing here, otherwise
392db074436SDarrick J. Wong * this short IO may expose stale data in the tail of
393db074436SDarrick J. Wong * the block we haven't written data to.
394db074436SDarrick J. Wong */
395db074436SDarrick J. Wong bio_put(bio);
396db074436SDarrick J. Wong goto zero_tail;
397db074436SDarrick J. Wong }
398db074436SDarrick J. Wong
399db074436SDarrick J. Wong n = bio->bi_iter.bi_size;
400db074436SDarrick J. Wong if (dio->flags & IOMAP_DIO_WRITE) {
401db074436SDarrick J. Wong task_io_account_write(n);
402db074436SDarrick J. Wong } else {
403db074436SDarrick J. Wong if (dio->flags & IOMAP_DIO_DIRTY)
404db074436SDarrick J. Wong bio_set_pages_dirty(bio);
405db074436SDarrick J. Wong }
406db074436SDarrick J. Wong
407db074436SDarrick J. Wong dio->size += n;
408db074436SDarrick J. Wong copied += n;
409db074436SDarrick J. Wong
4103e1a88ecSPavel Begunkov nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
411a8affc03SChristoph Hellwig BIO_MAX_VECS);
412f79d4749SChristoph Hellwig /*
413f79d4749SChristoph Hellwig * We can only poll for single bio I/Os.
414f79d4749SChristoph Hellwig */
415f79d4749SChristoph Hellwig if (nr_pages)
416f79d4749SChristoph Hellwig dio->iocb->ki_flags &= ~IOCB_HIPRI;
417a6d3d495SChristoph Hellwig iomap_dio_submit_bio(iter, dio, bio, pos);
4188cecd0baSGoldwyn Rodrigues pos += n;
419db074436SDarrick J. Wong } while (nr_pages);
420db074436SDarrick J. Wong
421db074436SDarrick J. Wong /*
422db074436SDarrick J. Wong * We need to zeroout the tail of a sub-block write if the extent type
423db074436SDarrick J. Wong * requires zeroing or the write extends beyond EOF. If we don't zero
424db074436SDarrick J. Wong * the block tail in the latter case, we can expose stale data via mmap
425db074436SDarrick J. Wong * reads of the EOF block.
426db074436SDarrick J. Wong */
427db074436SDarrick J. Wong zero_tail:
428db074436SDarrick J. Wong if (need_zeroout ||
429db074436SDarrick J. Wong ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
430db074436SDarrick J. Wong /* zero out from the end of the write to the end of the block */
431db074436SDarrick J. Wong pad = pos & (fs_block_size - 1);
432db074436SDarrick J. Wong if (pad)
433a6d3d495SChristoph Hellwig iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
434db074436SDarrick J. Wong }
435f550ee9bSJan Kara out:
436f550ee9bSJan Kara /* Undo iter limitation to current extent */
437f550ee9bSJan Kara iov_iter_reexpand(dio->submit.iter, orig_count - copied);
438e9f930acSJan Stancek if (copied)
439e9f930acSJan Stancek return copied;
440e9f930acSJan Stancek return ret;
441db074436SDarrick J. Wong }
442db074436SDarrick J. Wong
iomap_dio_hole_iter(const struct iomap_iter * iter,struct iomap_dio * dio)443a6d3d495SChristoph Hellwig static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
444a6d3d495SChristoph Hellwig struct iomap_dio *dio)
445db074436SDarrick J. Wong {
446a6d3d495SChristoph Hellwig loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
447a6d3d495SChristoph Hellwig
448db074436SDarrick J. Wong dio->size += length;
44942c498c1SAndreas Gruenbacher if (!length)
45042c498c1SAndreas Gruenbacher return -EFAULT;
451db074436SDarrick J. Wong return length;
452db074436SDarrick J. Wong }
453db074436SDarrick J. Wong
iomap_dio_inline_iter(const struct iomap_iter * iomi,struct iomap_dio * dio)454a6d3d495SChristoph Hellwig static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
455a6d3d495SChristoph Hellwig struct iomap_dio *dio)
456db074436SDarrick J. Wong {
457a6d3d495SChristoph Hellwig const struct iomap *iomap = &iomi->iomap;
458db074436SDarrick J. Wong struct iov_iter *iter = dio->submit.iter;
459a6d3d495SChristoph Hellwig void *inline_data = iomap_inline_data(iomap, iomi->pos);
460a6d3d495SChristoph Hellwig loff_t length = iomap_length(iomi);
461a6d3d495SChristoph Hellwig loff_t pos = iomi->pos;
462db074436SDarrick J. Wong size_t copied;
463db074436SDarrick J. Wong
46469f4a26cSGao Xiang if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
46569f4a26cSGao Xiang return -EIO;
466db074436SDarrick J. Wong
467db074436SDarrick J. Wong if (dio->flags & IOMAP_DIO_WRITE) {
468a6d3d495SChristoph Hellwig loff_t size = iomi->inode->i_size;
469db074436SDarrick J. Wong
470db074436SDarrick J. Wong if (pos > size)
47169f4a26cSGao Xiang memset(iomap_inline_data(iomap, size), 0, pos - size);
47269f4a26cSGao Xiang copied = copy_from_iter(inline_data, length, iter);
473db074436SDarrick J. Wong if (copied) {
474db074436SDarrick J. Wong if (pos + copied > size)
475a6d3d495SChristoph Hellwig i_size_write(iomi->inode, pos + copied);
476a6d3d495SChristoph Hellwig mark_inode_dirty(iomi->inode);
477db074436SDarrick J. Wong }
478db074436SDarrick J. Wong } else {
47969f4a26cSGao Xiang copied = copy_to_iter(inline_data, length, iter);
480db074436SDarrick J. Wong }
481db074436SDarrick J. Wong dio->size += copied;
48242c498c1SAndreas Gruenbacher if (!copied)
48342c498c1SAndreas Gruenbacher return -EFAULT;
484db074436SDarrick J. Wong return copied;
485db074436SDarrick J. Wong }
486db074436SDarrick J. Wong
iomap_dio_iter(const struct iomap_iter * iter,struct iomap_dio * dio)487a6d3d495SChristoph Hellwig static loff_t iomap_dio_iter(const struct iomap_iter *iter,
488a6d3d495SChristoph Hellwig struct iomap_dio *dio)
489db074436SDarrick J. Wong {
490a6d3d495SChristoph Hellwig switch (iter->iomap.type) {
491db074436SDarrick J. Wong case IOMAP_HOLE:
492db074436SDarrick J. Wong if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
493db074436SDarrick J. Wong return -EIO;
494a6d3d495SChristoph Hellwig return iomap_dio_hole_iter(iter, dio);
495db074436SDarrick J. Wong case IOMAP_UNWRITTEN:
496db074436SDarrick J. Wong if (!(dio->flags & IOMAP_DIO_WRITE))
497a6d3d495SChristoph Hellwig return iomap_dio_hole_iter(iter, dio);
498a6d3d495SChristoph Hellwig return iomap_dio_bio_iter(iter, dio);
499db074436SDarrick J. Wong case IOMAP_MAPPED:
500a6d3d495SChristoph Hellwig return iomap_dio_bio_iter(iter, dio);
501db074436SDarrick J. Wong case IOMAP_INLINE:
502a6d3d495SChristoph Hellwig return iomap_dio_inline_iter(iter, dio);
503a805c111SQian Cai case IOMAP_DELALLOC:
504a805c111SQian Cai /*
505a805c111SQian Cai * DIO is not serialised against mmap() access at all, and so
506a805c111SQian Cai * if the page_mkwrite occurs between the writeback and the
507a6d3d495SChristoph Hellwig * iomap_iter() call in the DIO path, then it will see the
508a805c111SQian Cai * DELALLOC block that the page-mkwrite allocated.
509a805c111SQian Cai */
510a805c111SQian Cai pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
511a805c111SQian Cai dio->iocb->ki_filp, current->comm);
512a805c111SQian Cai return -EIO;
513db074436SDarrick J. Wong default:
514db074436SDarrick J. Wong WARN_ON_ONCE(1);
515db074436SDarrick J. Wong return -EIO;
516db074436SDarrick J. Wong }
517db074436SDarrick J. Wong }
518db074436SDarrick J. Wong
519db074436SDarrick J. Wong /*
520db074436SDarrick J. Wong * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
521db074436SDarrick J. Wong * is being issued as AIO or not. This allows us to optimise pure data writes
522db074436SDarrick J. Wong * to use REQ_FUA rather than requiring generic_write_sync() to issue a
523db074436SDarrick J. Wong * REQ_FLUSH post write. This is slightly tricky because a single request here
524db074436SDarrick J. Wong * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
525db074436SDarrick J. Wong * may be pure data writes. In that case, we still need to do a full data sync
526db074436SDarrick J. Wong * completion.
52760263d58SChristoph Hellwig *
5284fdccaa0SAndreas Gruenbacher * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
5294fdccaa0SAndreas Gruenbacher * __iomap_dio_rw can return a partial result if it encounters a non-resident
5304fdccaa0SAndreas Gruenbacher * page in @iter after preparing a transfer. In that case, the non-resident
5314fdccaa0SAndreas Gruenbacher * pages can be faulted in and the request resumed with @done_before set to the
5324fdccaa0SAndreas Gruenbacher * number of bytes previously transferred. The request will then complete with
5334fdccaa0SAndreas Gruenbacher * the correct total number of bytes transferred; this is essential for
5344fdccaa0SAndreas Gruenbacher * completing partial requests asynchronously.
5354fdccaa0SAndreas Gruenbacher *
53660263d58SChristoph Hellwig * Returns -ENOTBLK In case of a page invalidation invalidation failure for
53760263d58SChristoph Hellwig * writes. The callers needs to fall back to buffered I/O in this case.
538db074436SDarrick J. Wong */
539c3d4ed1aSChristoph Hellwig struct iomap_dio *
__iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,unsigned int dio_flags,void * private,size_t done_before)540c3d4ed1aSChristoph Hellwig __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
54113ef9544SJan Kara const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
542786f847fSChristoph Hellwig unsigned int dio_flags, void *private, size_t done_before)
543db074436SDarrick J. Wong {
544db074436SDarrick J. Wong struct inode *inode = file_inode(iocb->ki_filp);
545a6d3d495SChristoph Hellwig struct iomap_iter iomi = {
546a6d3d495SChristoph Hellwig .inode = inode,
547a6d3d495SChristoph Hellwig .pos = iocb->ki_pos,
548a6d3d495SChristoph Hellwig .len = iov_iter_count(iter),
549a6d3d495SChristoph Hellwig .flags = IOMAP_DIRECT,
550786f847fSChristoph Hellwig .private = private,
551a6d3d495SChristoph Hellwig };
5522f632965SChristoph Hellwig bool wait_for_completion =
5532f632965SChristoph Hellwig is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
554db074436SDarrick J. Wong struct blk_plug plug;
555db074436SDarrick J. Wong struct iomap_dio *dio;
5568ee93b4bSChristoph Hellwig loff_t ret = 0;
557db074436SDarrick J. Wong
5583fd41721SRitesh Harjani (IBM) trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
5593fd41721SRitesh Harjani (IBM)
560a6d3d495SChristoph Hellwig if (!iomi.len)
561c3d4ed1aSChristoph Hellwig return NULL;
562db074436SDarrick J. Wong
563db074436SDarrick J. Wong dio = kmalloc(sizeof(*dio), GFP_KERNEL);
564db074436SDarrick J. Wong if (!dio)
565c3d4ed1aSChristoph Hellwig return ERR_PTR(-ENOMEM);
566db074436SDarrick J. Wong
567db074436SDarrick J. Wong dio->iocb = iocb;
568db074436SDarrick J. Wong atomic_set(&dio->ref, 1);
569db074436SDarrick J. Wong dio->size = 0;
570db074436SDarrick J. Wong dio->i_size = i_size_read(inode);
571838c4f3dSChristoph Hellwig dio->dops = dops;
572db074436SDarrick J. Wong dio->error = 0;
573db074436SDarrick J. Wong dio->flags = 0;
5744fdccaa0SAndreas Gruenbacher dio->done_before = done_before;
575db074436SDarrick J. Wong
576db074436SDarrick J. Wong dio->submit.iter = iter;
577db074436SDarrick J. Wong dio->submit.waiter = current;
578db074436SDarrick J. Wong
5798ee93b4bSChristoph Hellwig if (iocb->ki_flags & IOCB_NOWAIT)
5808ee93b4bSChristoph Hellwig iomi.flags |= IOMAP_NOWAIT;
5818ee93b4bSChristoph Hellwig
582db074436SDarrick J. Wong if (iov_iter_rw(iter) == READ) {
5837b3c14d1SJens Axboe /* reads can always complete inline */
5847b3c14d1SJens Axboe dio->flags |= IOMAP_DIO_INLINE_COMP;
5857b3c14d1SJens Axboe
586a6d3d495SChristoph Hellwig if (iomi.pos >= dio->i_size)
587db074436SDarrick J. Wong goto out_free_dio;
588db074436SDarrick J. Wong
589fcb14cb1SAl Viro if (user_backed_iter(iter))
590db074436SDarrick J. Wong dio->flags |= IOMAP_DIO_DIRTY;
5918ee93b4bSChristoph Hellwig
5928ee93b4bSChristoph Hellwig ret = kiocb_write_and_wait(iocb, iomi.len);
5938ee93b4bSChristoph Hellwig if (ret)
5948ee93b4bSChristoph Hellwig goto out_free_dio;
595db074436SDarrick J. Wong } else {
596a6d3d495SChristoph Hellwig iomi.flags |= IOMAP_WRITE;
597db074436SDarrick J. Wong dio->flags |= IOMAP_DIO_WRITE;
598db074436SDarrick J. Wong
599*8c052fb3SJens Axboe /*
600*8c052fb3SJens Axboe * Flag as supporting deferred completions, if the issuer
601*8c052fb3SJens Axboe * groks it. This can avoid a workqueue punt for writes.
602*8c052fb3SJens Axboe * We may later clear this flag if we need to do other IO
603*8c052fb3SJens Axboe * as part of this IO completion.
604*8c052fb3SJens Axboe */
605*8c052fb3SJens Axboe if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
606*8c052fb3SJens Axboe dio->flags |= IOMAP_DIO_CALLER_COMP;
607*8c052fb3SJens Axboe
6088ee93b4bSChristoph Hellwig if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
609985b71dbSJens Axboe ret = -EAGAIN;
6108ee93b4bSChristoph Hellwig if (iomi.pos >= dio->i_size ||
6118ee93b4bSChristoph Hellwig iomi.pos + iomi.len > dio->i_size)
612985b71dbSJens Axboe goto out_free_dio;
6138ee93b4bSChristoph Hellwig iomi.flags |= IOMAP_OVERWRITE_ONLY;
614985b71dbSJens Axboe }
615985b71dbSJens Axboe
616db074436SDarrick J. Wong /* for data sync or sync, we need sync completion processing */
617d3bff1fcSRitesh Harjani (IBM) if (iocb_is_dsync(iocb)) {
618db074436SDarrick J. Wong dio->flags |= IOMAP_DIO_NEED_SYNC;
619db074436SDarrick J. Wong
620db074436SDarrick J. Wong /*
6213a0be38cSJens Axboe * For datasync only writes, we optimistically try using
6223a0be38cSJens Axboe * WRITE_THROUGH for this IO. This flag requires either
6233a0be38cSJens Axboe * FUA writes through the device's write cache, or a
6243a0be38cSJens Axboe * normal write to a device without a volatile write
6253a0be38cSJens Axboe * cache. For the former, Any non-FUA write that occurs
6263a0be38cSJens Axboe * will clear this flag, hence we know before completion
6273a0be38cSJens Axboe * whether a cache flush is necessary.
628db074436SDarrick J. Wong */
62936518b6bSAl Viro if (!(iocb->ki_flags & IOCB_SYNC))
6303a0be38cSJens Axboe dio->flags |= IOMAP_DIO_WRITE_THROUGH;
631db074436SDarrick J. Wong }
632db074436SDarrick J. Wong
633db074436SDarrick J. Wong /*
63454752de9SDave Chinner * Try to invalidate cache pages for the range we are writing.
63560263d58SChristoph Hellwig * If this invalidation fails, let the caller fall back to
63660263d58SChristoph Hellwig * buffered I/O.
637db074436SDarrick J. Wong */
6388ee93b4bSChristoph Hellwig ret = kiocb_invalidate_pages(iocb, iomi.len);
6398ee93b4bSChristoph Hellwig if (ret) {
6408ee93b4bSChristoph Hellwig if (ret != -EAGAIN) {
641a6d3d495SChristoph Hellwig trace_iomap_dio_invalidate_fail(inode, iomi.pos,
642a6d3d495SChristoph Hellwig iomi.len);
64360263d58SChristoph Hellwig ret = -ENOTBLK;
6448ee93b4bSChristoph Hellwig }
64560263d58SChristoph Hellwig goto out_free_dio;
64660263d58SChristoph Hellwig }
647db074436SDarrick J. Wong
64854752de9SDave Chinner if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
649db074436SDarrick J. Wong ret = sb_init_dio_done_wq(inode->i_sb);
650db074436SDarrick J. Wong if (ret < 0)
651db074436SDarrick J. Wong goto out_free_dio;
652db074436SDarrick J. Wong }
65354752de9SDave Chinner }
654db074436SDarrick J. Wong
655db074436SDarrick J. Wong inode_dio_begin(inode);
656db074436SDarrick J. Wong
657db074436SDarrick J. Wong blk_start_plug(&plug);
658f79d4749SChristoph Hellwig while ((ret = iomap_iter(&iomi, ops)) > 0) {
659a6d3d495SChristoph Hellwig iomi.processed = iomap_dio_iter(&iomi, dio);
660f79d4749SChristoph Hellwig
661f79d4749SChristoph Hellwig /*
662f79d4749SChristoph Hellwig * We can only poll for single bio I/Os.
663f79d4749SChristoph Hellwig */
664f79d4749SChristoph Hellwig iocb->ki_flags &= ~IOCB_HIPRI;
665f79d4749SChristoph Hellwig }
666f79d4749SChristoph Hellwig
667a6d3d495SChristoph Hellwig blk_finish_plug(&plug);
668a6d3d495SChristoph Hellwig
669a6d3d495SChristoph Hellwig /*
670a6d3d495SChristoph Hellwig * We only report that we've read data up to i_size.
671a6d3d495SChristoph Hellwig * Revert iter to a state corresponding to that as some callers (such
672a6d3d495SChristoph Hellwig * as the splice code) rely on it.
673a6d3d495SChristoph Hellwig */
674a6d3d495SChristoph Hellwig if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
675a6d3d495SChristoph Hellwig iov_iter_revert(iter, iomi.pos - dio->i_size);
676a6d3d495SChristoph Hellwig
67797308f8bSAndreas Gruenbacher if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
67897308f8bSAndreas Gruenbacher if (!(iocb->ki_flags & IOCB_NOWAIT))
67997308f8bSAndreas Gruenbacher wait_for_completion = true;
68097308f8bSAndreas Gruenbacher ret = 0;
68197308f8bSAndreas Gruenbacher }
68297308f8bSAndreas Gruenbacher
683db074436SDarrick J. Wong /* magic error code to fall back to buffered I/O */
684db074436SDarrick J. Wong if (ret == -ENOTBLK) {
685db074436SDarrick J. Wong wait_for_completion = true;
686db074436SDarrick J. Wong ret = 0;
687db074436SDarrick J. Wong }
688db074436SDarrick J. Wong if (ret < 0)
689db074436SDarrick J. Wong iomap_dio_set_error(dio, ret);
690db074436SDarrick J. Wong
691db074436SDarrick J. Wong /*
6923a0be38cSJens Axboe * If all the writes we issued were already written through to the
6933a0be38cSJens Axboe * media, we don't need to flush the cache on IO completion. Clear the
6943a0be38cSJens Axboe * sync flag for this case.
695db074436SDarrick J. Wong */
6963a0be38cSJens Axboe if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
697db074436SDarrick J. Wong dio->flags &= ~IOMAP_DIO_NEED_SYNC;
698db074436SDarrick J. Wong
699db074436SDarrick J. Wong /*
700db074436SDarrick J. Wong * We are about to drop our additional submission reference, which
701d9973ce2Syangerkun * might be the last reference to the dio. There are three different
702d9973ce2Syangerkun * ways we can progress here:
703db074436SDarrick J. Wong *
704db074436SDarrick J. Wong * (a) If this is the last reference we will always complete and free
705db074436SDarrick J. Wong * the dio ourselves.
706db074436SDarrick J. Wong * (b) If this is not the last reference, and we serve an asynchronous
707db074436SDarrick J. Wong * iocb, we must never touch the dio after the decrement, the
708db074436SDarrick J. Wong * I/O completion handler will complete and free it.
709db074436SDarrick J. Wong * (c) If this is not the last reference, but we serve a synchronous
710db074436SDarrick J. Wong * iocb, the I/O completion handler will wake us up on the drop
711db074436SDarrick J. Wong * of the final reference, and we will complete and free it here
712db074436SDarrick J. Wong * after we got woken by the I/O completion handler.
713db074436SDarrick J. Wong */
714db074436SDarrick J. Wong dio->wait_for_completion = wait_for_completion;
715db074436SDarrick J. Wong if (!atomic_dec_and_test(&dio->ref)) {
7163fd41721SRitesh Harjani (IBM) if (!wait_for_completion) {
7173fd41721SRitesh Harjani (IBM) trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len);
718c3d4ed1aSChristoph Hellwig return ERR_PTR(-EIOCBQUEUED);
7193fd41721SRitesh Harjani (IBM) }
720db074436SDarrick J. Wong
721db074436SDarrick J. Wong for (;;) {
722db074436SDarrick J. Wong set_current_state(TASK_UNINTERRUPTIBLE);
723db074436SDarrick J. Wong if (!READ_ONCE(dio->submit.waiter))
724db074436SDarrick J. Wong break;
725db074436SDarrick J. Wong
726e6249cddSMing Lei blk_io_schedule();
727db074436SDarrick J. Wong }
728db074436SDarrick J. Wong __set_current_state(TASK_RUNNING);
729db074436SDarrick J. Wong }
730db074436SDarrick J. Wong
731c3d4ed1aSChristoph Hellwig return dio;
732db074436SDarrick J. Wong
733db074436SDarrick J. Wong out_free_dio:
734db074436SDarrick J. Wong kfree(dio);
735c3d4ed1aSChristoph Hellwig if (ret)
736c3d4ed1aSChristoph Hellwig return ERR_PTR(ret);
737c3d4ed1aSChristoph Hellwig return NULL;
738c3d4ed1aSChristoph Hellwig }
739c3d4ed1aSChristoph Hellwig EXPORT_SYMBOL_GPL(__iomap_dio_rw);
740c3d4ed1aSChristoph Hellwig
741c3d4ed1aSChristoph Hellwig ssize_t
iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,unsigned int dio_flags,void * private,size_t done_before)742c3d4ed1aSChristoph Hellwig iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
743c3d4ed1aSChristoph Hellwig const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
744786f847fSChristoph Hellwig unsigned int dio_flags, void *private, size_t done_before)
745c3d4ed1aSChristoph Hellwig {
746c3d4ed1aSChristoph Hellwig struct iomap_dio *dio;
747c3d4ed1aSChristoph Hellwig
748786f847fSChristoph Hellwig dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
749786f847fSChristoph Hellwig done_before);
750c3d4ed1aSChristoph Hellwig if (IS_ERR_OR_NULL(dio))
751c3d4ed1aSChristoph Hellwig return PTR_ERR_OR_ZERO(dio);
752c3d4ed1aSChristoph Hellwig return iomap_dio_complete(dio);
753db074436SDarrick J. Wong }
754db074436SDarrick J. Wong EXPORT_SYMBOL_GPL(iomap_dio_rw);
755