1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (c) 2016-2021 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/fscrypt.h> 10 #include <linux/pagemap.h> 11 #include <linux/iomap.h> 12 #include <linux/backing-dev.h> 13 #include <linux/uio.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include "trace.h" 16 17 #include "../internal.h" 18 19 /* 20 * Private flags for iomap_dio, must not overlap with the public ones in 21 * iomap.h: 22 */ 23 #define IOMAP_DIO_INLINE_COMP (1U << 27) 24 #define IOMAP_DIO_WRITE_THROUGH (1U << 28) 25 #define IOMAP_DIO_NEED_SYNC (1U << 29) 26 #define IOMAP_DIO_WRITE (1U << 30) 27 #define IOMAP_DIO_DIRTY (1U << 31) 28 29 struct iomap_dio { 30 struct kiocb *iocb; 31 const struct iomap_dio_ops *dops; 32 loff_t i_size; 33 loff_t size; 34 atomic_t ref; 35 unsigned flags; 36 int error; 37 size_t done_before; 38 bool wait_for_completion; 39 40 union { 41 /* used during submission and for synchronous completion: */ 42 struct { 43 struct iov_iter *iter; 44 struct task_struct *waiter; 45 } submit; 46 47 /* used for aio completion: */ 48 struct { 49 struct work_struct work; 50 } aio; 51 }; 52 }; 53 54 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, 55 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) 56 { 57 if (dio->dops && dio->dops->bio_set) 58 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, 59 GFP_KERNEL, dio->dops->bio_set); 60 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); 61 } 62 63 static void iomap_dio_submit_bio(const struct iomap_iter *iter, 64 struct iomap_dio *dio, struct bio *bio, loff_t pos) 65 { 66 struct kiocb *iocb = dio->iocb; 67 68 atomic_inc(&dio->ref); 69 70 /* Sync dio can't be polled reliably */ 71 if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) { 72 bio_set_polled(bio, iocb); 73 WRITE_ONCE(iocb->private, bio); 74 } 75 76 if (dio->dops && dio->dops->submit_io) 77 dio->dops->submit_io(iter, bio, pos); 78 else 79 submit_bio(bio); 80 } 81 82 ssize_t iomap_dio_complete(struct iomap_dio *dio) 83 { 84 const struct iomap_dio_ops *dops = dio->dops; 85 struct kiocb *iocb = dio->iocb; 86 loff_t offset = iocb->ki_pos; 87 ssize_t ret = dio->error; 88 89 if (dops && dops->end_io) 90 ret = dops->end_io(iocb, dio->size, ret, dio->flags); 91 92 if (likely(!ret)) { 93 ret = dio->size; 94 /* check for short read */ 95 if (offset + ret > dio->i_size && 96 !(dio->flags & IOMAP_DIO_WRITE)) 97 ret = dio->i_size - offset; 98 } 99 100 /* 101 * Try again to invalidate clean pages which might have been cached by 102 * non-direct readahead, or faulted in by get_user_pages() if the source 103 * of the write was an mmap'ed region of the file we're writing. Either 104 * one is a pretty crazy thing to do, so we don't support it 100%. If 105 * this invalidation fails, tough, the write still worked... 106 * 107 * And this page cache invalidation has to be after ->end_io(), as some 108 * filesystems convert unwritten extents to real allocations in 109 * ->end_io() when necessary, otherwise a racing buffer read would cache 110 * zeros from unwritten extents. 111 */ 112 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) 113 kiocb_invalidate_post_direct_write(iocb, dio->size); 114 115 inode_dio_end(file_inode(iocb->ki_filp)); 116 117 if (ret > 0) { 118 iocb->ki_pos += ret; 119 120 /* 121 * If this is a DSYNC write, make sure we push it to stable 122 * storage now that we've written data. 123 */ 124 if (dio->flags & IOMAP_DIO_NEED_SYNC) 125 ret = generic_write_sync(iocb, ret); 126 if (ret > 0) 127 ret += dio->done_before; 128 } 129 trace_iomap_dio_complete(iocb, dio->error, ret); 130 kfree(dio); 131 return ret; 132 } 133 EXPORT_SYMBOL_GPL(iomap_dio_complete); 134 135 static void iomap_dio_complete_work(struct work_struct *work) 136 { 137 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 138 struct kiocb *iocb = dio->iocb; 139 140 iocb->ki_complete(iocb, iomap_dio_complete(dio)); 141 } 142 143 /* 144 * Set an error in the dio if none is set yet. We have to use cmpxchg 145 * as the submission context and the completion context(s) can race to 146 * update the error. 147 */ 148 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 149 { 150 cmpxchg(&dio->error, 0, ret); 151 } 152 153 void iomap_dio_bio_end_io(struct bio *bio) 154 { 155 struct iomap_dio *dio = bio->bi_private; 156 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 157 struct kiocb *iocb = dio->iocb; 158 159 if (bio->bi_status) 160 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 161 if (!atomic_dec_and_test(&dio->ref)) 162 goto release_bio; 163 164 /* 165 * Synchronous dio, task itself will handle any completion work 166 * that needs after IO. All we need to do is wake the task. 167 */ 168 if (dio->wait_for_completion) { 169 struct task_struct *waiter = dio->submit.waiter; 170 171 WRITE_ONCE(dio->submit.waiter, NULL); 172 blk_wake_io_task(waiter); 173 goto release_bio; 174 } 175 176 /* 177 * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline 178 */ 179 if (dio->flags & IOMAP_DIO_INLINE_COMP) { 180 WRITE_ONCE(iocb->private, NULL); 181 iomap_dio_complete_work(&dio->aio.work); 182 goto release_bio; 183 } 184 185 /* 186 * Async DIO completion that requires filesystem level completion work 187 * gets punted to a work queue to complete as the operation may require 188 * more IO to be issued to finalise filesystem metadata changes or 189 * guarantee data integrity. 190 */ 191 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 192 queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq, 193 &dio->aio.work); 194 release_bio: 195 if (should_dirty) { 196 bio_check_pages_dirty(bio); 197 } else { 198 bio_release_pages(bio, false); 199 bio_put(bio); 200 } 201 } 202 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io); 203 204 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, 205 loff_t pos, unsigned len) 206 { 207 struct inode *inode = file_inode(dio->iocb->ki_filp); 208 struct page *page = ZERO_PAGE(0); 209 struct bio *bio; 210 211 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); 212 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 213 GFP_KERNEL); 214 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); 215 bio->bi_private = dio; 216 bio->bi_end_io = iomap_dio_bio_end_io; 217 218 __bio_add_page(bio, page, len, 0); 219 iomap_dio_submit_bio(iter, dio, bio, pos); 220 } 221 222 /* 223 * Figure out the bio's operation flags from the dio request, the 224 * mapping, and whether or not we want FUA. Note that we can end up 225 * clearing the WRITE_THROUGH flag in the dio request. 226 */ 227 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, 228 const struct iomap *iomap, bool use_fua) 229 { 230 blk_opf_t opflags = REQ_SYNC | REQ_IDLE; 231 232 if (!(dio->flags & IOMAP_DIO_WRITE)) 233 return REQ_OP_READ; 234 235 opflags |= REQ_OP_WRITE; 236 if (use_fua) 237 opflags |= REQ_FUA; 238 else 239 dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; 240 241 return opflags; 242 } 243 244 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, 245 struct iomap_dio *dio) 246 { 247 const struct iomap *iomap = &iter->iomap; 248 struct inode *inode = iter->inode; 249 unsigned int fs_block_size = i_blocksize(inode), pad; 250 loff_t length = iomap_length(iter); 251 loff_t pos = iter->pos; 252 blk_opf_t bio_opf; 253 struct bio *bio; 254 bool need_zeroout = false; 255 bool use_fua = false; 256 int nr_pages, ret = 0; 257 size_t copied = 0; 258 size_t orig_count; 259 260 if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) || 261 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) 262 return -EINVAL; 263 264 if (iomap->type == IOMAP_UNWRITTEN) { 265 dio->flags |= IOMAP_DIO_UNWRITTEN; 266 need_zeroout = true; 267 } 268 269 if (iomap->flags & IOMAP_F_SHARED) 270 dio->flags |= IOMAP_DIO_COW; 271 272 if (iomap->flags & IOMAP_F_NEW) { 273 need_zeroout = true; 274 } else if (iomap->type == IOMAP_MAPPED) { 275 /* 276 * Use a FUA write if we need datasync semantics, this is a pure 277 * data IO that doesn't require any metadata updates (including 278 * after IO completion such as unwritten extent conversion) and 279 * the underlying device either supports FUA or doesn't have 280 * a volatile write cache. This allows us to avoid cache flushes 281 * on IO completion. 282 */ 283 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 284 (dio->flags & IOMAP_DIO_WRITE_THROUGH) && 285 (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev))) 286 use_fua = true; 287 } 288 289 /* 290 * Save the original count and trim the iter to just the extent we 291 * are operating on right now. The iter will be re-expanded once 292 * we are done. 293 */ 294 orig_count = iov_iter_count(dio->submit.iter); 295 iov_iter_truncate(dio->submit.iter, length); 296 297 if (!iov_iter_count(dio->submit.iter)) 298 goto out; 299 300 /* 301 * We can only poll for single bio I/Os. 302 */ 303 if (need_zeroout || 304 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) 305 dio->iocb->ki_flags &= ~IOCB_HIPRI; 306 307 if (need_zeroout) { 308 /* zero out from the start of the block to the write offset */ 309 pad = pos & (fs_block_size - 1); 310 if (pad) 311 iomap_dio_zero(iter, dio, pos - pad, pad); 312 } 313 314 /* 315 * Set the operation flags early so that bio_iov_iter_get_pages 316 * can set up the page vector appropriately for a ZONE_APPEND 317 * operation. 318 */ 319 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); 320 321 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); 322 do { 323 size_t n; 324 if (dio->error) { 325 iov_iter_revert(dio->submit.iter, copied); 326 copied = ret = 0; 327 goto out; 328 } 329 330 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf); 331 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 332 GFP_KERNEL); 333 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 334 bio->bi_ioprio = dio->iocb->ki_ioprio; 335 bio->bi_private = dio; 336 bio->bi_end_io = iomap_dio_bio_end_io; 337 338 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); 339 if (unlikely(ret)) { 340 /* 341 * We have to stop part way through an IO. We must fall 342 * through to the sub-block tail zeroing here, otherwise 343 * this short IO may expose stale data in the tail of 344 * the block we haven't written data to. 345 */ 346 bio_put(bio); 347 goto zero_tail; 348 } 349 350 n = bio->bi_iter.bi_size; 351 if (dio->flags & IOMAP_DIO_WRITE) { 352 task_io_account_write(n); 353 } else { 354 if (dio->flags & IOMAP_DIO_DIRTY) 355 bio_set_pages_dirty(bio); 356 } 357 358 dio->size += n; 359 copied += n; 360 361 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, 362 BIO_MAX_VECS); 363 /* 364 * We can only poll for single bio I/Os. 365 */ 366 if (nr_pages) 367 dio->iocb->ki_flags &= ~IOCB_HIPRI; 368 iomap_dio_submit_bio(iter, dio, bio, pos); 369 pos += n; 370 } while (nr_pages); 371 372 /* 373 * We need to zeroout the tail of a sub-block write if the extent type 374 * requires zeroing or the write extends beyond EOF. If we don't zero 375 * the block tail in the latter case, we can expose stale data via mmap 376 * reads of the EOF block. 377 */ 378 zero_tail: 379 if (need_zeroout || 380 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { 381 /* zero out from the end of the write to the end of the block */ 382 pad = pos & (fs_block_size - 1); 383 if (pad) 384 iomap_dio_zero(iter, dio, pos, fs_block_size - pad); 385 } 386 out: 387 /* Undo iter limitation to current extent */ 388 iov_iter_reexpand(dio->submit.iter, orig_count - copied); 389 if (copied) 390 return copied; 391 return ret; 392 } 393 394 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, 395 struct iomap_dio *dio) 396 { 397 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); 398 399 dio->size += length; 400 if (!length) 401 return -EFAULT; 402 return length; 403 } 404 405 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, 406 struct iomap_dio *dio) 407 { 408 const struct iomap *iomap = &iomi->iomap; 409 struct iov_iter *iter = dio->submit.iter; 410 void *inline_data = iomap_inline_data(iomap, iomi->pos); 411 loff_t length = iomap_length(iomi); 412 loff_t pos = iomi->pos; 413 size_t copied; 414 415 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) 416 return -EIO; 417 418 if (dio->flags & IOMAP_DIO_WRITE) { 419 loff_t size = iomi->inode->i_size; 420 421 if (pos > size) 422 memset(iomap_inline_data(iomap, size), 0, pos - size); 423 copied = copy_from_iter(inline_data, length, iter); 424 if (copied) { 425 if (pos + copied > size) 426 i_size_write(iomi->inode, pos + copied); 427 mark_inode_dirty(iomi->inode); 428 } 429 } else { 430 copied = copy_to_iter(inline_data, length, iter); 431 } 432 dio->size += copied; 433 if (!copied) 434 return -EFAULT; 435 return copied; 436 } 437 438 static loff_t iomap_dio_iter(const struct iomap_iter *iter, 439 struct iomap_dio *dio) 440 { 441 switch (iter->iomap.type) { 442 case IOMAP_HOLE: 443 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 444 return -EIO; 445 return iomap_dio_hole_iter(iter, dio); 446 case IOMAP_UNWRITTEN: 447 if (!(dio->flags & IOMAP_DIO_WRITE)) 448 return iomap_dio_hole_iter(iter, dio); 449 return iomap_dio_bio_iter(iter, dio); 450 case IOMAP_MAPPED: 451 return iomap_dio_bio_iter(iter, dio); 452 case IOMAP_INLINE: 453 return iomap_dio_inline_iter(iter, dio); 454 case IOMAP_DELALLOC: 455 /* 456 * DIO is not serialised against mmap() access at all, and so 457 * if the page_mkwrite occurs between the writeback and the 458 * iomap_iter() call in the DIO path, then it will see the 459 * DELALLOC block that the page-mkwrite allocated. 460 */ 461 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", 462 dio->iocb->ki_filp, current->comm); 463 return -EIO; 464 default: 465 WARN_ON_ONCE(1); 466 return -EIO; 467 } 468 } 469 470 /* 471 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 472 * is being issued as AIO or not. This allows us to optimise pure data writes 473 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 474 * REQ_FLUSH post write. This is slightly tricky because a single request here 475 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 476 * may be pure data writes. In that case, we still need to do a full data sync 477 * completion. 478 * 479 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, 480 * __iomap_dio_rw can return a partial result if it encounters a non-resident 481 * page in @iter after preparing a transfer. In that case, the non-resident 482 * pages can be faulted in and the request resumed with @done_before set to the 483 * number of bytes previously transferred. The request will then complete with 484 * the correct total number of bytes transferred; this is essential for 485 * completing partial requests asynchronously. 486 * 487 * Returns -ENOTBLK In case of a page invalidation invalidation failure for 488 * writes. The callers needs to fall back to buffered I/O in this case. 489 */ 490 struct iomap_dio * 491 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 492 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 493 unsigned int dio_flags, void *private, size_t done_before) 494 { 495 struct inode *inode = file_inode(iocb->ki_filp); 496 struct iomap_iter iomi = { 497 .inode = inode, 498 .pos = iocb->ki_pos, 499 .len = iov_iter_count(iter), 500 .flags = IOMAP_DIRECT, 501 .private = private, 502 }; 503 bool wait_for_completion = 504 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); 505 struct blk_plug plug; 506 struct iomap_dio *dio; 507 loff_t ret = 0; 508 509 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before); 510 511 if (!iomi.len) 512 return NULL; 513 514 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 515 if (!dio) 516 return ERR_PTR(-ENOMEM); 517 518 dio->iocb = iocb; 519 atomic_set(&dio->ref, 1); 520 dio->size = 0; 521 dio->i_size = i_size_read(inode); 522 dio->dops = dops; 523 dio->error = 0; 524 dio->flags = 0; 525 dio->done_before = done_before; 526 527 dio->submit.iter = iter; 528 dio->submit.waiter = current; 529 530 if (iocb->ki_flags & IOCB_NOWAIT) 531 iomi.flags |= IOMAP_NOWAIT; 532 533 if (iov_iter_rw(iter) == READ) { 534 /* reads can always complete inline */ 535 dio->flags |= IOMAP_DIO_INLINE_COMP; 536 537 if (iomi.pos >= dio->i_size) 538 goto out_free_dio; 539 540 if (user_backed_iter(iter)) 541 dio->flags |= IOMAP_DIO_DIRTY; 542 543 ret = kiocb_write_and_wait(iocb, iomi.len); 544 if (ret) 545 goto out_free_dio; 546 } else { 547 iomi.flags |= IOMAP_WRITE; 548 dio->flags |= IOMAP_DIO_WRITE; 549 550 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { 551 ret = -EAGAIN; 552 if (iomi.pos >= dio->i_size || 553 iomi.pos + iomi.len > dio->i_size) 554 goto out_free_dio; 555 iomi.flags |= IOMAP_OVERWRITE_ONLY; 556 } 557 558 /* for data sync or sync, we need sync completion processing */ 559 if (iocb_is_dsync(iocb)) { 560 dio->flags |= IOMAP_DIO_NEED_SYNC; 561 562 /* 563 * For datasync only writes, we optimistically try using 564 * WRITE_THROUGH for this IO. This flag requires either 565 * FUA writes through the device's write cache, or a 566 * normal write to a device without a volatile write 567 * cache. For the former, Any non-FUA write that occurs 568 * will clear this flag, hence we know before completion 569 * whether a cache flush is necessary. 570 */ 571 if (!(iocb->ki_flags & IOCB_SYNC)) 572 dio->flags |= IOMAP_DIO_WRITE_THROUGH; 573 } 574 575 /* 576 * Try to invalidate cache pages for the range we are writing. 577 * If this invalidation fails, let the caller fall back to 578 * buffered I/O. 579 */ 580 ret = kiocb_invalidate_pages(iocb, iomi.len); 581 if (ret) { 582 if (ret != -EAGAIN) { 583 trace_iomap_dio_invalidate_fail(inode, iomi.pos, 584 iomi.len); 585 ret = -ENOTBLK; 586 } 587 goto out_free_dio; 588 } 589 590 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { 591 ret = sb_init_dio_done_wq(inode->i_sb); 592 if (ret < 0) 593 goto out_free_dio; 594 } 595 } 596 597 inode_dio_begin(inode); 598 599 blk_start_plug(&plug); 600 while ((ret = iomap_iter(&iomi, ops)) > 0) { 601 iomi.processed = iomap_dio_iter(&iomi, dio); 602 603 /* 604 * We can only poll for single bio I/Os. 605 */ 606 iocb->ki_flags &= ~IOCB_HIPRI; 607 } 608 609 blk_finish_plug(&plug); 610 611 /* 612 * We only report that we've read data up to i_size. 613 * Revert iter to a state corresponding to that as some callers (such 614 * as the splice code) rely on it. 615 */ 616 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) 617 iov_iter_revert(iter, iomi.pos - dio->i_size); 618 619 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { 620 if (!(iocb->ki_flags & IOCB_NOWAIT)) 621 wait_for_completion = true; 622 ret = 0; 623 } 624 625 /* magic error code to fall back to buffered I/O */ 626 if (ret == -ENOTBLK) { 627 wait_for_completion = true; 628 ret = 0; 629 } 630 if (ret < 0) 631 iomap_dio_set_error(dio, ret); 632 633 /* 634 * If all the writes we issued were already written through to the 635 * media, we don't need to flush the cache on IO completion. Clear the 636 * sync flag for this case. 637 */ 638 if (dio->flags & IOMAP_DIO_WRITE_THROUGH) 639 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 640 641 /* 642 * We are about to drop our additional submission reference, which 643 * might be the last reference to the dio. There are three different 644 * ways we can progress here: 645 * 646 * (a) If this is the last reference we will always complete and free 647 * the dio ourselves. 648 * (b) If this is not the last reference, and we serve an asynchronous 649 * iocb, we must never touch the dio after the decrement, the 650 * I/O completion handler will complete and free it. 651 * (c) If this is not the last reference, but we serve a synchronous 652 * iocb, the I/O completion handler will wake us up on the drop 653 * of the final reference, and we will complete and free it here 654 * after we got woken by the I/O completion handler. 655 */ 656 dio->wait_for_completion = wait_for_completion; 657 if (!atomic_dec_and_test(&dio->ref)) { 658 if (!wait_for_completion) { 659 trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len); 660 return ERR_PTR(-EIOCBQUEUED); 661 } 662 663 for (;;) { 664 set_current_state(TASK_UNINTERRUPTIBLE); 665 if (!READ_ONCE(dio->submit.waiter)) 666 break; 667 668 blk_io_schedule(); 669 } 670 __set_current_state(TASK_RUNNING); 671 } 672 673 return dio; 674 675 out_free_dio: 676 kfree(dio); 677 if (ret) 678 return ERR_PTR(ret); 679 return NULL; 680 } 681 EXPORT_SYMBOL_GPL(__iomap_dio_rw); 682 683 ssize_t 684 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 685 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 686 unsigned int dio_flags, void *private, size_t done_before) 687 { 688 struct iomap_dio *dio; 689 690 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private, 691 done_before); 692 if (IS_ERR_OR_NULL(dio)) 693 return PTR_ERR_OR_ZERO(dio); 694 return iomap_dio_complete(dio); 695 } 696 EXPORT_SYMBOL_GPL(iomap_dio_rw); 697