1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (c) 2016-2021 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/fscrypt.h> 10 #include <linux/pagemap.h> 11 #include <linux/iomap.h> 12 #include <linux/backing-dev.h> 13 #include <linux/uio.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include "trace.h" 16 17 #include "../internal.h" 18 19 /* 20 * Private flags for iomap_dio, must not overlap with the public ones in 21 * iomap.h: 22 */ 23 #define IOMAP_DIO_WRITE_THROUGH (1U << 28) 24 #define IOMAP_DIO_NEED_SYNC (1U << 29) 25 #define IOMAP_DIO_WRITE (1U << 30) 26 #define IOMAP_DIO_DIRTY (1U << 31) 27 28 struct iomap_dio { 29 struct kiocb *iocb; 30 const struct iomap_dio_ops *dops; 31 loff_t i_size; 32 loff_t size; 33 atomic_t ref; 34 unsigned flags; 35 int error; 36 size_t done_before; 37 bool wait_for_completion; 38 39 union { 40 /* used during submission and for synchronous completion: */ 41 struct { 42 struct iov_iter *iter; 43 struct task_struct *waiter; 44 } submit; 45 46 /* used for aio completion: */ 47 struct { 48 struct work_struct work; 49 } aio; 50 }; 51 }; 52 53 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, 54 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) 55 { 56 if (dio->dops && dio->dops->bio_set) 57 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, 58 GFP_KERNEL, dio->dops->bio_set); 59 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); 60 } 61 62 static void iomap_dio_submit_bio(const struct iomap_iter *iter, 63 struct iomap_dio *dio, struct bio *bio, loff_t pos) 64 { 65 struct kiocb *iocb = dio->iocb; 66 67 atomic_inc(&dio->ref); 68 69 /* Sync dio can't be polled reliably */ 70 if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) { 71 bio_set_polled(bio, iocb); 72 WRITE_ONCE(iocb->private, bio); 73 } 74 75 if (dio->dops && dio->dops->submit_io) 76 dio->dops->submit_io(iter, bio, pos); 77 else 78 submit_bio(bio); 79 } 80 81 ssize_t iomap_dio_complete(struct iomap_dio *dio) 82 { 83 const struct iomap_dio_ops *dops = dio->dops; 84 struct kiocb *iocb = dio->iocb; 85 loff_t offset = iocb->ki_pos; 86 ssize_t ret = dio->error; 87 88 if (dops && dops->end_io) 89 ret = dops->end_io(iocb, dio->size, ret, dio->flags); 90 91 if (likely(!ret)) { 92 ret = dio->size; 93 /* check for short read */ 94 if (offset + ret > dio->i_size && 95 !(dio->flags & IOMAP_DIO_WRITE)) 96 ret = dio->i_size - offset; 97 } 98 99 /* 100 * Try again to invalidate clean pages which might have been cached by 101 * non-direct readahead, or faulted in by get_user_pages() if the source 102 * of the write was an mmap'ed region of the file we're writing. Either 103 * one is a pretty crazy thing to do, so we don't support it 100%. If 104 * this invalidation fails, tough, the write still worked... 105 * 106 * And this page cache invalidation has to be after ->end_io(), as some 107 * filesystems convert unwritten extents to real allocations in 108 * ->end_io() when necessary, otherwise a racing buffer read would cache 109 * zeros from unwritten extents. 110 */ 111 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) 112 kiocb_invalidate_post_direct_write(iocb, dio->size); 113 114 inode_dio_end(file_inode(iocb->ki_filp)); 115 116 if (ret > 0) { 117 iocb->ki_pos += ret; 118 119 /* 120 * If this is a DSYNC write, make sure we push it to stable 121 * storage now that we've written data. 122 */ 123 if (dio->flags & IOMAP_DIO_NEED_SYNC) 124 ret = generic_write_sync(iocb, ret); 125 if (ret > 0) 126 ret += dio->done_before; 127 } 128 trace_iomap_dio_complete(iocb, dio->error, ret); 129 kfree(dio); 130 return ret; 131 } 132 EXPORT_SYMBOL_GPL(iomap_dio_complete); 133 134 static void iomap_dio_complete_work(struct work_struct *work) 135 { 136 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 137 struct kiocb *iocb = dio->iocb; 138 139 iocb->ki_complete(iocb, iomap_dio_complete(dio)); 140 } 141 142 /* 143 * Set an error in the dio if none is set yet. We have to use cmpxchg 144 * as the submission context and the completion context(s) can race to 145 * update the error. 146 */ 147 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 148 { 149 cmpxchg(&dio->error, 0, ret); 150 } 151 152 void iomap_dio_bio_end_io(struct bio *bio) 153 { 154 struct iomap_dio *dio = bio->bi_private; 155 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 156 struct kiocb *iocb = dio->iocb; 157 158 if (bio->bi_status) 159 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 160 if (!atomic_dec_and_test(&dio->ref)) 161 goto release_bio; 162 163 /* 164 * Synchronous dio, task itself will handle any completion work 165 * that needs after IO. All we need to do is wake the task. 166 */ 167 if (dio->wait_for_completion) { 168 struct task_struct *waiter = dio->submit.waiter; 169 170 WRITE_ONCE(dio->submit.waiter, NULL); 171 blk_wake_io_task(waiter); 172 goto release_bio; 173 } 174 175 /* Read completion can always complete inline. */ 176 if (!(dio->flags & IOMAP_DIO_WRITE)) { 177 WRITE_ONCE(iocb->private, NULL); 178 iomap_dio_complete_work(&dio->aio.work); 179 goto release_bio; 180 } 181 182 /* 183 * Async DIO completion that requires filesystem level completion work 184 * gets punted to a work queue to complete as the operation may require 185 * more IO to be issued to finalise filesystem metadata changes or 186 * guarantee data integrity. 187 */ 188 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 189 queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq, 190 &dio->aio.work); 191 release_bio: 192 if (should_dirty) { 193 bio_check_pages_dirty(bio); 194 } else { 195 bio_release_pages(bio, false); 196 bio_put(bio); 197 } 198 } 199 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io); 200 201 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, 202 loff_t pos, unsigned len) 203 { 204 struct inode *inode = file_inode(dio->iocb->ki_filp); 205 struct page *page = ZERO_PAGE(0); 206 struct bio *bio; 207 208 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); 209 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 210 GFP_KERNEL); 211 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); 212 bio->bi_private = dio; 213 bio->bi_end_io = iomap_dio_bio_end_io; 214 215 __bio_add_page(bio, page, len, 0); 216 iomap_dio_submit_bio(iter, dio, bio, pos); 217 } 218 219 /* 220 * Figure out the bio's operation flags from the dio request, the 221 * mapping, and whether or not we want FUA. Note that we can end up 222 * clearing the WRITE_THROUGH flag in the dio request. 223 */ 224 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, 225 const struct iomap *iomap, bool use_fua) 226 { 227 blk_opf_t opflags = REQ_SYNC | REQ_IDLE; 228 229 if (!(dio->flags & IOMAP_DIO_WRITE)) 230 return REQ_OP_READ; 231 232 opflags |= REQ_OP_WRITE; 233 if (use_fua) 234 opflags |= REQ_FUA; 235 else 236 dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; 237 238 return opflags; 239 } 240 241 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, 242 struct iomap_dio *dio) 243 { 244 const struct iomap *iomap = &iter->iomap; 245 struct inode *inode = iter->inode; 246 unsigned int fs_block_size = i_blocksize(inode), pad; 247 loff_t length = iomap_length(iter); 248 loff_t pos = iter->pos; 249 blk_opf_t bio_opf; 250 struct bio *bio; 251 bool need_zeroout = false; 252 bool use_fua = false; 253 int nr_pages, ret = 0; 254 size_t copied = 0; 255 size_t orig_count; 256 257 if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) || 258 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) 259 return -EINVAL; 260 261 if (iomap->type == IOMAP_UNWRITTEN) { 262 dio->flags |= IOMAP_DIO_UNWRITTEN; 263 need_zeroout = true; 264 } 265 266 if (iomap->flags & IOMAP_F_SHARED) 267 dio->flags |= IOMAP_DIO_COW; 268 269 if (iomap->flags & IOMAP_F_NEW) { 270 need_zeroout = true; 271 } else if (iomap->type == IOMAP_MAPPED) { 272 /* 273 * Use a FUA write if we need datasync semantics, this is a pure 274 * data IO that doesn't require any metadata updates (including 275 * after IO completion such as unwritten extent conversion) and 276 * the underlying device either supports FUA or doesn't have 277 * a volatile write cache. This allows us to avoid cache flushes 278 * on IO completion. 279 */ 280 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 281 (dio->flags & IOMAP_DIO_WRITE_THROUGH) && 282 (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev))) 283 use_fua = true; 284 } 285 286 /* 287 * Save the original count and trim the iter to just the extent we 288 * are operating on right now. The iter will be re-expanded once 289 * we are done. 290 */ 291 orig_count = iov_iter_count(dio->submit.iter); 292 iov_iter_truncate(dio->submit.iter, length); 293 294 if (!iov_iter_count(dio->submit.iter)) 295 goto out; 296 297 /* 298 * We can only poll for single bio I/Os. 299 */ 300 if (need_zeroout || 301 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) 302 dio->iocb->ki_flags &= ~IOCB_HIPRI; 303 304 if (need_zeroout) { 305 /* zero out from the start of the block to the write offset */ 306 pad = pos & (fs_block_size - 1); 307 if (pad) 308 iomap_dio_zero(iter, dio, pos - pad, pad); 309 } 310 311 /* 312 * Set the operation flags early so that bio_iov_iter_get_pages 313 * can set up the page vector appropriately for a ZONE_APPEND 314 * operation. 315 */ 316 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); 317 318 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); 319 do { 320 size_t n; 321 if (dio->error) { 322 iov_iter_revert(dio->submit.iter, copied); 323 copied = ret = 0; 324 goto out; 325 } 326 327 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf); 328 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 329 GFP_KERNEL); 330 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 331 bio->bi_ioprio = dio->iocb->ki_ioprio; 332 bio->bi_private = dio; 333 bio->bi_end_io = iomap_dio_bio_end_io; 334 335 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); 336 if (unlikely(ret)) { 337 /* 338 * We have to stop part way through an IO. We must fall 339 * through to the sub-block tail zeroing here, otherwise 340 * this short IO may expose stale data in the tail of 341 * the block we haven't written data to. 342 */ 343 bio_put(bio); 344 goto zero_tail; 345 } 346 347 n = bio->bi_iter.bi_size; 348 if (dio->flags & IOMAP_DIO_WRITE) { 349 task_io_account_write(n); 350 } else { 351 if (dio->flags & IOMAP_DIO_DIRTY) 352 bio_set_pages_dirty(bio); 353 } 354 355 dio->size += n; 356 copied += n; 357 358 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, 359 BIO_MAX_VECS); 360 /* 361 * We can only poll for single bio I/Os. 362 */ 363 if (nr_pages) 364 dio->iocb->ki_flags &= ~IOCB_HIPRI; 365 iomap_dio_submit_bio(iter, dio, bio, pos); 366 pos += n; 367 } while (nr_pages); 368 369 /* 370 * We need to zeroout the tail of a sub-block write if the extent type 371 * requires zeroing or the write extends beyond EOF. If we don't zero 372 * the block tail in the latter case, we can expose stale data via mmap 373 * reads of the EOF block. 374 */ 375 zero_tail: 376 if (need_zeroout || 377 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { 378 /* zero out from the end of the write to the end of the block */ 379 pad = pos & (fs_block_size - 1); 380 if (pad) 381 iomap_dio_zero(iter, dio, pos, fs_block_size - pad); 382 } 383 out: 384 /* Undo iter limitation to current extent */ 385 iov_iter_reexpand(dio->submit.iter, orig_count - copied); 386 if (copied) 387 return copied; 388 return ret; 389 } 390 391 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, 392 struct iomap_dio *dio) 393 { 394 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); 395 396 dio->size += length; 397 if (!length) 398 return -EFAULT; 399 return length; 400 } 401 402 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, 403 struct iomap_dio *dio) 404 { 405 const struct iomap *iomap = &iomi->iomap; 406 struct iov_iter *iter = dio->submit.iter; 407 void *inline_data = iomap_inline_data(iomap, iomi->pos); 408 loff_t length = iomap_length(iomi); 409 loff_t pos = iomi->pos; 410 size_t copied; 411 412 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) 413 return -EIO; 414 415 if (dio->flags & IOMAP_DIO_WRITE) { 416 loff_t size = iomi->inode->i_size; 417 418 if (pos > size) 419 memset(iomap_inline_data(iomap, size), 0, pos - size); 420 copied = copy_from_iter(inline_data, length, iter); 421 if (copied) { 422 if (pos + copied > size) 423 i_size_write(iomi->inode, pos + copied); 424 mark_inode_dirty(iomi->inode); 425 } 426 } else { 427 copied = copy_to_iter(inline_data, length, iter); 428 } 429 dio->size += copied; 430 if (!copied) 431 return -EFAULT; 432 return copied; 433 } 434 435 static loff_t iomap_dio_iter(const struct iomap_iter *iter, 436 struct iomap_dio *dio) 437 { 438 switch (iter->iomap.type) { 439 case IOMAP_HOLE: 440 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 441 return -EIO; 442 return iomap_dio_hole_iter(iter, dio); 443 case IOMAP_UNWRITTEN: 444 if (!(dio->flags & IOMAP_DIO_WRITE)) 445 return iomap_dio_hole_iter(iter, dio); 446 return iomap_dio_bio_iter(iter, dio); 447 case IOMAP_MAPPED: 448 return iomap_dio_bio_iter(iter, dio); 449 case IOMAP_INLINE: 450 return iomap_dio_inline_iter(iter, dio); 451 case IOMAP_DELALLOC: 452 /* 453 * DIO is not serialised against mmap() access at all, and so 454 * if the page_mkwrite occurs between the writeback and the 455 * iomap_iter() call in the DIO path, then it will see the 456 * DELALLOC block that the page-mkwrite allocated. 457 */ 458 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", 459 dio->iocb->ki_filp, current->comm); 460 return -EIO; 461 default: 462 WARN_ON_ONCE(1); 463 return -EIO; 464 } 465 } 466 467 /* 468 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 469 * is being issued as AIO or not. This allows us to optimise pure data writes 470 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 471 * REQ_FLUSH post write. This is slightly tricky because a single request here 472 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 473 * may be pure data writes. In that case, we still need to do a full data sync 474 * completion. 475 * 476 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, 477 * __iomap_dio_rw can return a partial result if it encounters a non-resident 478 * page in @iter after preparing a transfer. In that case, the non-resident 479 * pages can be faulted in and the request resumed with @done_before set to the 480 * number of bytes previously transferred. The request will then complete with 481 * the correct total number of bytes transferred; this is essential for 482 * completing partial requests asynchronously. 483 * 484 * Returns -ENOTBLK In case of a page invalidation invalidation failure for 485 * writes. The callers needs to fall back to buffered I/O in this case. 486 */ 487 struct iomap_dio * 488 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 489 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 490 unsigned int dio_flags, void *private, size_t done_before) 491 { 492 struct inode *inode = file_inode(iocb->ki_filp); 493 struct iomap_iter iomi = { 494 .inode = inode, 495 .pos = iocb->ki_pos, 496 .len = iov_iter_count(iter), 497 .flags = IOMAP_DIRECT, 498 .private = private, 499 }; 500 bool wait_for_completion = 501 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); 502 struct blk_plug plug; 503 struct iomap_dio *dio; 504 loff_t ret = 0; 505 506 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before); 507 508 if (!iomi.len) 509 return NULL; 510 511 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 512 if (!dio) 513 return ERR_PTR(-ENOMEM); 514 515 dio->iocb = iocb; 516 atomic_set(&dio->ref, 1); 517 dio->size = 0; 518 dio->i_size = i_size_read(inode); 519 dio->dops = dops; 520 dio->error = 0; 521 dio->flags = 0; 522 dio->done_before = done_before; 523 524 dio->submit.iter = iter; 525 dio->submit.waiter = current; 526 527 if (iocb->ki_flags & IOCB_NOWAIT) 528 iomi.flags |= IOMAP_NOWAIT; 529 530 if (iov_iter_rw(iter) == READ) { 531 if (iomi.pos >= dio->i_size) 532 goto out_free_dio; 533 534 if (user_backed_iter(iter)) 535 dio->flags |= IOMAP_DIO_DIRTY; 536 537 ret = kiocb_write_and_wait(iocb, iomi.len); 538 if (ret) 539 goto out_free_dio; 540 } else { 541 iomi.flags |= IOMAP_WRITE; 542 dio->flags |= IOMAP_DIO_WRITE; 543 544 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { 545 ret = -EAGAIN; 546 if (iomi.pos >= dio->i_size || 547 iomi.pos + iomi.len > dio->i_size) 548 goto out_free_dio; 549 iomi.flags |= IOMAP_OVERWRITE_ONLY; 550 } 551 552 /* for data sync or sync, we need sync completion processing */ 553 if (iocb_is_dsync(iocb)) { 554 dio->flags |= IOMAP_DIO_NEED_SYNC; 555 556 /* 557 * For datasync only writes, we optimistically try using 558 * WRITE_THROUGH for this IO. This flag requires either 559 * FUA writes through the device's write cache, or a 560 * normal write to a device without a volatile write 561 * cache. For the former, Any non-FUA write that occurs 562 * will clear this flag, hence we know before completion 563 * whether a cache flush is necessary. 564 */ 565 if (!(iocb->ki_flags & IOCB_SYNC)) 566 dio->flags |= IOMAP_DIO_WRITE_THROUGH; 567 } 568 569 /* 570 * Try to invalidate cache pages for the range we are writing. 571 * If this invalidation fails, let the caller fall back to 572 * buffered I/O. 573 */ 574 ret = kiocb_invalidate_pages(iocb, iomi.len); 575 if (ret) { 576 if (ret != -EAGAIN) { 577 trace_iomap_dio_invalidate_fail(inode, iomi.pos, 578 iomi.len); 579 ret = -ENOTBLK; 580 } 581 goto out_free_dio; 582 } 583 584 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { 585 ret = sb_init_dio_done_wq(inode->i_sb); 586 if (ret < 0) 587 goto out_free_dio; 588 } 589 } 590 591 inode_dio_begin(inode); 592 593 blk_start_plug(&plug); 594 while ((ret = iomap_iter(&iomi, ops)) > 0) { 595 iomi.processed = iomap_dio_iter(&iomi, dio); 596 597 /* 598 * We can only poll for single bio I/Os. 599 */ 600 iocb->ki_flags &= ~IOCB_HIPRI; 601 } 602 603 blk_finish_plug(&plug); 604 605 /* 606 * We only report that we've read data up to i_size. 607 * Revert iter to a state corresponding to that as some callers (such 608 * as the splice code) rely on it. 609 */ 610 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) 611 iov_iter_revert(iter, iomi.pos - dio->i_size); 612 613 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { 614 if (!(iocb->ki_flags & IOCB_NOWAIT)) 615 wait_for_completion = true; 616 ret = 0; 617 } 618 619 /* magic error code to fall back to buffered I/O */ 620 if (ret == -ENOTBLK) { 621 wait_for_completion = true; 622 ret = 0; 623 } 624 if (ret < 0) 625 iomap_dio_set_error(dio, ret); 626 627 /* 628 * If all the writes we issued were already written through to the 629 * media, we don't need to flush the cache on IO completion. Clear the 630 * sync flag for this case. 631 */ 632 if (dio->flags & IOMAP_DIO_WRITE_THROUGH) 633 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 634 635 /* 636 * We are about to drop our additional submission reference, which 637 * might be the last reference to the dio. There are three different 638 * ways we can progress here: 639 * 640 * (a) If this is the last reference we will always complete and free 641 * the dio ourselves. 642 * (b) If this is not the last reference, and we serve an asynchronous 643 * iocb, we must never touch the dio after the decrement, the 644 * I/O completion handler will complete and free it. 645 * (c) If this is not the last reference, but we serve a synchronous 646 * iocb, the I/O completion handler will wake us up on the drop 647 * of the final reference, and we will complete and free it here 648 * after we got woken by the I/O completion handler. 649 */ 650 dio->wait_for_completion = wait_for_completion; 651 if (!atomic_dec_and_test(&dio->ref)) { 652 if (!wait_for_completion) { 653 trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len); 654 return ERR_PTR(-EIOCBQUEUED); 655 } 656 657 for (;;) { 658 set_current_state(TASK_UNINTERRUPTIBLE); 659 if (!READ_ONCE(dio->submit.waiter)) 660 break; 661 662 blk_io_schedule(); 663 } 664 __set_current_state(TASK_RUNNING); 665 } 666 667 return dio; 668 669 out_free_dio: 670 kfree(dio); 671 if (ret) 672 return ERR_PTR(ret); 673 return NULL; 674 } 675 EXPORT_SYMBOL_GPL(__iomap_dio_rw); 676 677 ssize_t 678 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 679 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 680 unsigned int dio_flags, void *private, size_t done_before) 681 { 682 struct iomap_dio *dio; 683 684 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private, 685 done_before); 686 if (IS_ERR_OR_NULL(dio)) 687 return PTR_ERR_OR_ZERO(dio); 688 return iomap_dio_complete(dio); 689 } 690 EXPORT_SYMBOL_GPL(iomap_dio_rw); 691