1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (c) 2016-2021 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/fscrypt.h> 10 #include <linux/pagemap.h> 11 #include <linux/iomap.h> 12 #include <linux/backing-dev.h> 13 #include <linux/uio.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include "trace.h" 16 17 #include "../internal.h" 18 19 /* 20 * Private flags for iomap_dio, must not overlap with the public ones in 21 * iomap.h: 22 */ 23 #define IOMAP_DIO_WRITE_FUA (1 << 28) 24 #define IOMAP_DIO_NEED_SYNC (1 << 29) 25 #define IOMAP_DIO_WRITE (1 << 30) 26 #define IOMAP_DIO_DIRTY (1 << 31) 27 28 struct iomap_dio { 29 struct kiocb *iocb; 30 const struct iomap_dio_ops *dops; 31 loff_t i_size; 32 loff_t size; 33 atomic_t ref; 34 unsigned flags; 35 int error; 36 size_t done_before; 37 bool wait_for_completion; 38 39 union { 40 /* used during submission and for synchronous completion: */ 41 struct { 42 struct iov_iter *iter; 43 struct task_struct *waiter; 44 struct bio *poll_bio; 45 } submit; 46 47 /* used for aio completion: */ 48 struct { 49 struct work_struct work; 50 } aio; 51 }; 52 }; 53 54 static void iomap_dio_submit_bio(const struct iomap_iter *iter, 55 struct iomap_dio *dio, struct bio *bio, loff_t pos) 56 { 57 atomic_inc(&dio->ref); 58 59 /* Sync dio can't be polled reliably */ 60 if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) { 61 bio_set_polled(bio, dio->iocb); 62 dio->submit.poll_bio = bio; 63 } 64 65 if (dio->dops && dio->dops->submit_io) 66 dio->dops->submit_io(iter, bio, pos); 67 else 68 submit_bio(bio); 69 } 70 71 ssize_t iomap_dio_complete(struct iomap_dio *dio) 72 { 73 const struct iomap_dio_ops *dops = dio->dops; 74 struct kiocb *iocb = dio->iocb; 75 struct inode *inode = file_inode(iocb->ki_filp); 76 loff_t offset = iocb->ki_pos; 77 ssize_t ret = dio->error; 78 79 if (dops && dops->end_io) 80 ret = dops->end_io(iocb, dio->size, ret, dio->flags); 81 82 if (likely(!ret)) { 83 ret = dio->size; 84 /* check for short read */ 85 if (offset + ret > dio->i_size && 86 !(dio->flags & IOMAP_DIO_WRITE)) 87 ret = dio->i_size - offset; 88 iocb->ki_pos += ret; 89 } 90 91 /* 92 * Try again to invalidate clean pages which might have been cached by 93 * non-direct readahead, or faulted in by get_user_pages() if the source 94 * of the write was an mmap'ed region of the file we're writing. Either 95 * one is a pretty crazy thing to do, so we don't support it 100%. If 96 * this invalidation fails, tough, the write still worked... 97 * 98 * And this page cache invalidation has to be after ->end_io(), as some 99 * filesystems convert unwritten extents to real allocations in 100 * ->end_io() when necessary, otherwise a racing buffer read would cache 101 * zeros from unwritten extents. 102 */ 103 if (!dio->error && dio->size && 104 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { 105 int err; 106 err = invalidate_inode_pages2_range(inode->i_mapping, 107 offset >> PAGE_SHIFT, 108 (offset + dio->size - 1) >> PAGE_SHIFT); 109 if (err) 110 dio_warn_stale_pagecache(iocb->ki_filp); 111 } 112 113 inode_dio_end(file_inode(iocb->ki_filp)); 114 /* 115 * If this is a DSYNC write, make sure we push it to stable storage now 116 * that we've written data. 117 */ 118 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) 119 ret = generic_write_sync(iocb, ret); 120 121 if (ret > 0) 122 ret += dio->done_before; 123 124 kfree(dio); 125 126 return ret; 127 } 128 EXPORT_SYMBOL_GPL(iomap_dio_complete); 129 130 static void iomap_dio_complete_work(struct work_struct *work) 131 { 132 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 133 struct kiocb *iocb = dio->iocb; 134 135 iocb->ki_complete(iocb, iomap_dio_complete(dio)); 136 } 137 138 /* 139 * Set an error in the dio if none is set yet. We have to use cmpxchg 140 * as the submission context and the completion context(s) can race to 141 * update the error. 142 */ 143 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 144 { 145 cmpxchg(&dio->error, 0, ret); 146 } 147 148 static void iomap_dio_bio_end_io(struct bio *bio) 149 { 150 struct iomap_dio *dio = bio->bi_private; 151 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 152 153 if (bio->bi_status) 154 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 155 156 if (atomic_dec_and_test(&dio->ref)) { 157 if (dio->wait_for_completion) { 158 struct task_struct *waiter = dio->submit.waiter; 159 WRITE_ONCE(dio->submit.waiter, NULL); 160 blk_wake_io_task(waiter); 161 } else if (dio->flags & IOMAP_DIO_WRITE) { 162 struct inode *inode = file_inode(dio->iocb->ki_filp); 163 164 WRITE_ONCE(dio->iocb->private, NULL); 165 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 166 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 167 } else { 168 WRITE_ONCE(dio->iocb->private, NULL); 169 iomap_dio_complete_work(&dio->aio.work); 170 } 171 } 172 173 if (should_dirty) { 174 bio_check_pages_dirty(bio); 175 } else { 176 bio_release_pages(bio, false); 177 bio_put(bio); 178 } 179 } 180 181 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, 182 loff_t pos, unsigned len) 183 { 184 struct inode *inode = file_inode(dio->iocb->ki_filp); 185 struct page *page = ZERO_PAGE(0); 186 int flags = REQ_SYNC | REQ_IDLE; 187 struct bio *bio; 188 189 bio = bio_alloc(iter->iomap.bdev, 1, REQ_OP_WRITE | flags, GFP_KERNEL); 190 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 191 GFP_KERNEL); 192 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); 193 bio->bi_private = dio; 194 bio->bi_end_io = iomap_dio_bio_end_io; 195 196 get_page(page); 197 __bio_add_page(bio, page, len, 0); 198 iomap_dio_submit_bio(iter, dio, bio, pos); 199 } 200 201 /* 202 * Figure out the bio's operation flags from the dio request, the 203 * mapping, and whether or not we want FUA. Note that we can end up 204 * clearing the WRITE_FUA flag in the dio request. 205 */ 206 static inline unsigned int iomap_dio_bio_opflags(struct iomap_dio *dio, 207 const struct iomap *iomap, bool use_fua) 208 { 209 unsigned int opflags = REQ_SYNC | REQ_IDLE; 210 211 if (!(dio->flags & IOMAP_DIO_WRITE)) { 212 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND); 213 return REQ_OP_READ; 214 } 215 216 if (iomap->flags & IOMAP_F_ZONE_APPEND) 217 opflags |= REQ_OP_ZONE_APPEND; 218 else 219 opflags |= REQ_OP_WRITE; 220 221 if (use_fua) 222 opflags |= REQ_FUA; 223 else 224 dio->flags &= ~IOMAP_DIO_WRITE_FUA; 225 226 return opflags; 227 } 228 229 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, 230 struct iomap_dio *dio) 231 { 232 const struct iomap *iomap = &iter->iomap; 233 struct inode *inode = iter->inode; 234 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 235 unsigned int fs_block_size = i_blocksize(inode), pad; 236 unsigned int align = iov_iter_alignment(dio->submit.iter); 237 loff_t length = iomap_length(iter); 238 loff_t pos = iter->pos; 239 unsigned int bio_opf; 240 struct bio *bio; 241 bool need_zeroout = false; 242 bool use_fua = false; 243 int nr_pages, ret = 0; 244 size_t copied = 0; 245 size_t orig_count; 246 247 if ((pos | length | align) & ((1 << blkbits) - 1)) 248 return -EINVAL; 249 250 if (iomap->type == IOMAP_UNWRITTEN) { 251 dio->flags |= IOMAP_DIO_UNWRITTEN; 252 need_zeroout = true; 253 } 254 255 if (iomap->flags & IOMAP_F_SHARED) 256 dio->flags |= IOMAP_DIO_COW; 257 258 if (iomap->flags & IOMAP_F_NEW) { 259 need_zeroout = true; 260 } else if (iomap->type == IOMAP_MAPPED) { 261 /* 262 * Use a FUA write if we need datasync semantics, this is a pure 263 * data IO that doesn't require any metadata updates (including 264 * after IO completion such as unwritten extent conversion) and 265 * the underlying device supports FUA. This allows us to avoid 266 * cache flushes on IO completion. 267 */ 268 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 269 (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev)) 270 use_fua = true; 271 } 272 273 /* 274 * Save the original count and trim the iter to just the extent we 275 * are operating on right now. The iter will be re-expanded once 276 * we are done. 277 */ 278 orig_count = iov_iter_count(dio->submit.iter); 279 iov_iter_truncate(dio->submit.iter, length); 280 281 if (!iov_iter_count(dio->submit.iter)) 282 goto out; 283 284 /* 285 * We can only poll for single bio I/Os. 286 */ 287 if (need_zeroout || 288 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) 289 dio->iocb->ki_flags &= ~IOCB_HIPRI; 290 291 if (need_zeroout) { 292 /* zero out from the start of the block to the write offset */ 293 pad = pos & (fs_block_size - 1); 294 if (pad) 295 iomap_dio_zero(iter, dio, pos - pad, pad); 296 } 297 298 /* 299 * Set the operation flags early so that bio_iov_iter_get_pages 300 * can set up the page vector appropriately for a ZONE_APPEND 301 * operation. 302 */ 303 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); 304 305 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); 306 do { 307 size_t n; 308 if (dio->error) { 309 iov_iter_revert(dio->submit.iter, copied); 310 copied = ret = 0; 311 goto out; 312 } 313 314 bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL); 315 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 316 GFP_KERNEL); 317 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 318 bio->bi_ioprio = dio->iocb->ki_ioprio; 319 bio->bi_private = dio; 320 bio->bi_end_io = iomap_dio_bio_end_io; 321 322 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); 323 if (unlikely(ret)) { 324 /* 325 * We have to stop part way through an IO. We must fall 326 * through to the sub-block tail zeroing here, otherwise 327 * this short IO may expose stale data in the tail of 328 * the block we haven't written data to. 329 */ 330 bio_put(bio); 331 goto zero_tail; 332 } 333 334 n = bio->bi_iter.bi_size; 335 if (dio->flags & IOMAP_DIO_WRITE) { 336 task_io_account_write(n); 337 } else { 338 if (dio->flags & IOMAP_DIO_DIRTY) 339 bio_set_pages_dirty(bio); 340 } 341 342 dio->size += n; 343 copied += n; 344 345 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, 346 BIO_MAX_VECS); 347 /* 348 * We can only poll for single bio I/Os. 349 */ 350 if (nr_pages) 351 dio->iocb->ki_flags &= ~IOCB_HIPRI; 352 iomap_dio_submit_bio(iter, dio, bio, pos); 353 pos += n; 354 } while (nr_pages); 355 356 /* 357 * We need to zeroout the tail of a sub-block write if the extent type 358 * requires zeroing or the write extends beyond EOF. If we don't zero 359 * the block tail in the latter case, we can expose stale data via mmap 360 * reads of the EOF block. 361 */ 362 zero_tail: 363 if (need_zeroout || 364 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { 365 /* zero out from the end of the write to the end of the block */ 366 pad = pos & (fs_block_size - 1); 367 if (pad) 368 iomap_dio_zero(iter, dio, pos, fs_block_size - pad); 369 } 370 out: 371 /* Undo iter limitation to current extent */ 372 iov_iter_reexpand(dio->submit.iter, orig_count - copied); 373 if (copied) 374 return copied; 375 return ret; 376 } 377 378 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, 379 struct iomap_dio *dio) 380 { 381 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); 382 383 dio->size += length; 384 if (!length) 385 return -EFAULT; 386 return length; 387 } 388 389 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, 390 struct iomap_dio *dio) 391 { 392 const struct iomap *iomap = &iomi->iomap; 393 struct iov_iter *iter = dio->submit.iter; 394 void *inline_data = iomap_inline_data(iomap, iomi->pos); 395 loff_t length = iomap_length(iomi); 396 loff_t pos = iomi->pos; 397 size_t copied; 398 399 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) 400 return -EIO; 401 402 if (dio->flags & IOMAP_DIO_WRITE) { 403 loff_t size = iomi->inode->i_size; 404 405 if (pos > size) 406 memset(iomap_inline_data(iomap, size), 0, pos - size); 407 copied = copy_from_iter(inline_data, length, iter); 408 if (copied) { 409 if (pos + copied > size) 410 i_size_write(iomi->inode, pos + copied); 411 mark_inode_dirty(iomi->inode); 412 } 413 } else { 414 copied = copy_to_iter(inline_data, length, iter); 415 } 416 dio->size += copied; 417 if (!copied) 418 return -EFAULT; 419 return copied; 420 } 421 422 static loff_t iomap_dio_iter(const struct iomap_iter *iter, 423 struct iomap_dio *dio) 424 { 425 switch (iter->iomap.type) { 426 case IOMAP_HOLE: 427 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 428 return -EIO; 429 return iomap_dio_hole_iter(iter, dio); 430 case IOMAP_UNWRITTEN: 431 if (!(dio->flags & IOMAP_DIO_WRITE)) 432 return iomap_dio_hole_iter(iter, dio); 433 return iomap_dio_bio_iter(iter, dio); 434 case IOMAP_MAPPED: 435 return iomap_dio_bio_iter(iter, dio); 436 case IOMAP_INLINE: 437 return iomap_dio_inline_iter(iter, dio); 438 case IOMAP_DELALLOC: 439 /* 440 * DIO is not serialised against mmap() access at all, and so 441 * if the page_mkwrite occurs between the writeback and the 442 * iomap_iter() call in the DIO path, then it will see the 443 * DELALLOC block that the page-mkwrite allocated. 444 */ 445 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", 446 dio->iocb->ki_filp, current->comm); 447 return -EIO; 448 default: 449 WARN_ON_ONCE(1); 450 return -EIO; 451 } 452 } 453 454 /* 455 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 456 * is being issued as AIO or not. This allows us to optimise pure data writes 457 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 458 * REQ_FLUSH post write. This is slightly tricky because a single request here 459 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 460 * may be pure data writes. In that case, we still need to do a full data sync 461 * completion. 462 * 463 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, 464 * __iomap_dio_rw can return a partial result if it encounters a non-resident 465 * page in @iter after preparing a transfer. In that case, the non-resident 466 * pages can be faulted in and the request resumed with @done_before set to the 467 * number of bytes previously transferred. The request will then complete with 468 * the correct total number of bytes transferred; this is essential for 469 * completing partial requests asynchronously. 470 * 471 * Returns -ENOTBLK In case of a page invalidation invalidation failure for 472 * writes. The callers needs to fall back to buffered I/O in this case. 473 */ 474 struct iomap_dio * 475 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 476 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 477 unsigned int dio_flags, size_t done_before) 478 { 479 struct address_space *mapping = iocb->ki_filp->f_mapping; 480 struct inode *inode = file_inode(iocb->ki_filp); 481 struct iomap_iter iomi = { 482 .inode = inode, 483 .pos = iocb->ki_pos, 484 .len = iov_iter_count(iter), 485 .flags = IOMAP_DIRECT, 486 }; 487 loff_t end = iomi.pos + iomi.len - 1, ret = 0; 488 bool wait_for_completion = 489 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); 490 struct blk_plug plug; 491 struct iomap_dio *dio; 492 493 if (!iomi.len) 494 return NULL; 495 496 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 497 if (!dio) 498 return ERR_PTR(-ENOMEM); 499 500 dio->iocb = iocb; 501 atomic_set(&dio->ref, 1); 502 dio->size = 0; 503 dio->i_size = i_size_read(inode); 504 dio->dops = dops; 505 dio->error = 0; 506 dio->flags = 0; 507 dio->done_before = done_before; 508 509 dio->submit.iter = iter; 510 dio->submit.waiter = current; 511 dio->submit.poll_bio = NULL; 512 513 if (iov_iter_rw(iter) == READ) { 514 if (iomi.pos >= dio->i_size) 515 goto out_free_dio; 516 517 if (iocb->ki_flags & IOCB_NOWAIT) { 518 if (filemap_range_needs_writeback(mapping, iomi.pos, 519 end)) { 520 ret = -EAGAIN; 521 goto out_free_dio; 522 } 523 iomi.flags |= IOMAP_NOWAIT; 524 } 525 526 if (iter_is_iovec(iter)) 527 dio->flags |= IOMAP_DIO_DIRTY; 528 } else { 529 iomi.flags |= IOMAP_WRITE; 530 dio->flags |= IOMAP_DIO_WRITE; 531 532 if (iocb->ki_flags & IOCB_NOWAIT) { 533 if (filemap_range_has_page(mapping, iomi.pos, end)) { 534 ret = -EAGAIN; 535 goto out_free_dio; 536 } 537 iomi.flags |= IOMAP_NOWAIT; 538 } 539 540 /* for data sync or sync, we need sync completion processing */ 541 if (iocb->ki_flags & IOCB_DSYNC) 542 dio->flags |= IOMAP_DIO_NEED_SYNC; 543 544 /* 545 * For datasync only writes, we optimistically try using FUA for 546 * this IO. Any non-FUA write that occurs will clear this flag, 547 * hence we know before completion whether a cache flush is 548 * necessary. 549 */ 550 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) 551 dio->flags |= IOMAP_DIO_WRITE_FUA; 552 } 553 554 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { 555 ret = -EAGAIN; 556 if (iomi.pos >= dio->i_size || 557 iomi.pos + iomi.len > dio->i_size) 558 goto out_free_dio; 559 iomi.flags |= IOMAP_OVERWRITE_ONLY; 560 } 561 562 ret = filemap_write_and_wait_range(mapping, iomi.pos, end); 563 if (ret) 564 goto out_free_dio; 565 566 if (iov_iter_rw(iter) == WRITE) { 567 /* 568 * Try to invalidate cache pages for the range we are writing. 569 * If this invalidation fails, let the caller fall back to 570 * buffered I/O. 571 */ 572 if (invalidate_inode_pages2_range(mapping, 573 iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) { 574 trace_iomap_dio_invalidate_fail(inode, iomi.pos, 575 iomi.len); 576 ret = -ENOTBLK; 577 goto out_free_dio; 578 } 579 580 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { 581 ret = sb_init_dio_done_wq(inode->i_sb); 582 if (ret < 0) 583 goto out_free_dio; 584 } 585 } 586 587 inode_dio_begin(inode); 588 589 blk_start_plug(&plug); 590 while ((ret = iomap_iter(&iomi, ops)) > 0) { 591 iomi.processed = iomap_dio_iter(&iomi, dio); 592 593 /* 594 * We can only poll for single bio I/Os. 595 */ 596 iocb->ki_flags &= ~IOCB_HIPRI; 597 } 598 599 blk_finish_plug(&plug); 600 601 /* 602 * We only report that we've read data up to i_size. 603 * Revert iter to a state corresponding to that as some callers (such 604 * as the splice code) rely on it. 605 */ 606 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) 607 iov_iter_revert(iter, iomi.pos - dio->i_size); 608 609 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { 610 if (!(iocb->ki_flags & IOCB_NOWAIT)) 611 wait_for_completion = true; 612 ret = 0; 613 } 614 615 /* magic error code to fall back to buffered I/O */ 616 if (ret == -ENOTBLK) { 617 wait_for_completion = true; 618 ret = 0; 619 } 620 if (ret < 0) 621 iomap_dio_set_error(dio, ret); 622 623 /* 624 * If all the writes we issued were FUA, we don't need to flush the 625 * cache on IO completion. Clear the sync flag for this case. 626 */ 627 if (dio->flags & IOMAP_DIO_WRITE_FUA) 628 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 629 630 WRITE_ONCE(iocb->private, dio->submit.poll_bio); 631 632 /* 633 * We are about to drop our additional submission reference, which 634 * might be the last reference to the dio. There are three different 635 * ways we can progress here: 636 * 637 * (a) If this is the last reference we will always complete and free 638 * the dio ourselves. 639 * (b) If this is not the last reference, and we serve an asynchronous 640 * iocb, we must never touch the dio after the decrement, the 641 * I/O completion handler will complete and free it. 642 * (c) If this is not the last reference, but we serve a synchronous 643 * iocb, the I/O completion handler will wake us up on the drop 644 * of the final reference, and we will complete and free it here 645 * after we got woken by the I/O completion handler. 646 */ 647 dio->wait_for_completion = wait_for_completion; 648 if (!atomic_dec_and_test(&dio->ref)) { 649 if (!wait_for_completion) 650 return ERR_PTR(-EIOCBQUEUED); 651 652 for (;;) { 653 set_current_state(TASK_UNINTERRUPTIBLE); 654 if (!READ_ONCE(dio->submit.waiter)) 655 break; 656 657 blk_io_schedule(); 658 } 659 __set_current_state(TASK_RUNNING); 660 } 661 662 return dio; 663 664 out_free_dio: 665 kfree(dio); 666 if (ret) 667 return ERR_PTR(ret); 668 return NULL; 669 } 670 EXPORT_SYMBOL_GPL(__iomap_dio_rw); 671 672 ssize_t 673 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 674 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 675 unsigned int dio_flags, size_t done_before) 676 { 677 struct iomap_dio *dio; 678 679 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, done_before); 680 if (IS_ERR_OR_NULL(dio)) 681 return PTR_ERR_OR_ZERO(dio); 682 return iomap_dio_complete(dio); 683 } 684 EXPORT_SYMBOL_GPL(iomap_dio_rw); 685