1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (c) 2016-2021 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/fscrypt.h> 10 #include <linux/pagemap.h> 11 #include <linux/iomap.h> 12 #include <linux/backing-dev.h> 13 #include <linux/uio.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include "trace.h" 16 17 #include "../internal.h" 18 19 /* 20 * Private flags for iomap_dio, must not overlap with the public ones in 21 * iomap.h: 22 */ 23 #define IOMAP_DIO_WRITE_FUA (1 << 28) 24 #define IOMAP_DIO_NEED_SYNC (1 << 29) 25 #define IOMAP_DIO_WRITE (1 << 30) 26 #define IOMAP_DIO_DIRTY (1 << 31) 27 28 struct iomap_dio { 29 struct kiocb *iocb; 30 const struct iomap_dio_ops *dops; 31 loff_t i_size; 32 loff_t size; 33 atomic_t ref; 34 unsigned flags; 35 int error; 36 size_t done_before; 37 bool wait_for_completion; 38 39 union { 40 /* used during submission and for synchronous completion: */ 41 struct { 42 struct iov_iter *iter; 43 struct task_struct *waiter; 44 struct bio *poll_bio; 45 } submit; 46 47 /* used for aio completion: */ 48 struct { 49 struct work_struct work; 50 } aio; 51 }; 52 }; 53 54 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter, 55 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) 56 { 57 if (dio->dops && dio->dops->bio_set) 58 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, 59 GFP_KERNEL, dio->dops->bio_set); 60 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); 61 } 62 63 static void iomap_dio_submit_bio(const struct iomap_iter *iter, 64 struct iomap_dio *dio, struct bio *bio, loff_t pos) 65 { 66 atomic_inc(&dio->ref); 67 68 /* Sync dio can't be polled reliably */ 69 if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) { 70 bio_set_polled(bio, dio->iocb); 71 dio->submit.poll_bio = bio; 72 } 73 74 if (dio->dops && dio->dops->submit_io) 75 dio->dops->submit_io(iter, bio, pos); 76 else 77 submit_bio(bio); 78 } 79 80 ssize_t iomap_dio_complete(struct iomap_dio *dio) 81 { 82 const struct iomap_dio_ops *dops = dio->dops; 83 struct kiocb *iocb = dio->iocb; 84 loff_t offset = iocb->ki_pos; 85 ssize_t ret = dio->error; 86 87 if (dops && dops->end_io) 88 ret = dops->end_io(iocb, dio->size, ret, dio->flags); 89 90 if (likely(!ret)) { 91 ret = dio->size; 92 /* check for short read */ 93 if (offset + ret > dio->i_size && 94 !(dio->flags & IOMAP_DIO_WRITE)) 95 ret = dio->i_size - offset; 96 } 97 98 /* 99 * Try again to invalidate clean pages which might have been cached by 100 * non-direct readahead, or faulted in by get_user_pages() if the source 101 * of the write was an mmap'ed region of the file we're writing. Either 102 * one is a pretty crazy thing to do, so we don't support it 100%. If 103 * this invalidation fails, tough, the write still worked... 104 * 105 * And this page cache invalidation has to be after ->end_io(), as some 106 * filesystems convert unwritten extents to real allocations in 107 * ->end_io() when necessary, otherwise a racing buffer read would cache 108 * zeros from unwritten extents. 109 */ 110 if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) 111 kiocb_invalidate_post_direct_write(iocb, dio->size); 112 113 inode_dio_end(file_inode(iocb->ki_filp)); 114 115 if (ret > 0) { 116 iocb->ki_pos += ret; 117 118 /* 119 * If this is a DSYNC write, make sure we push it to stable 120 * storage now that we've written data. 121 */ 122 if (dio->flags & IOMAP_DIO_NEED_SYNC) 123 ret = generic_write_sync(iocb, ret); 124 if (ret > 0) 125 ret += dio->done_before; 126 } 127 trace_iomap_dio_complete(iocb, dio->error, ret); 128 kfree(dio); 129 return ret; 130 } 131 EXPORT_SYMBOL_GPL(iomap_dio_complete); 132 133 static void iomap_dio_complete_work(struct work_struct *work) 134 { 135 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 136 struct kiocb *iocb = dio->iocb; 137 138 iocb->ki_complete(iocb, iomap_dio_complete(dio)); 139 } 140 141 /* 142 * Set an error in the dio if none is set yet. We have to use cmpxchg 143 * as the submission context and the completion context(s) can race to 144 * update the error. 145 */ 146 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 147 { 148 cmpxchg(&dio->error, 0, ret); 149 } 150 151 void iomap_dio_bio_end_io(struct bio *bio) 152 { 153 struct iomap_dio *dio = bio->bi_private; 154 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 155 156 if (bio->bi_status) 157 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 158 159 if (atomic_dec_and_test(&dio->ref)) { 160 if (dio->wait_for_completion) { 161 struct task_struct *waiter = dio->submit.waiter; 162 WRITE_ONCE(dio->submit.waiter, NULL); 163 blk_wake_io_task(waiter); 164 } else if (dio->flags & IOMAP_DIO_WRITE) { 165 struct inode *inode = file_inode(dio->iocb->ki_filp); 166 167 WRITE_ONCE(dio->iocb->private, NULL); 168 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 169 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 170 } else { 171 WRITE_ONCE(dio->iocb->private, NULL); 172 iomap_dio_complete_work(&dio->aio.work); 173 } 174 } 175 176 if (should_dirty) { 177 bio_check_pages_dirty(bio); 178 } else { 179 bio_release_pages(bio, false); 180 bio_put(bio); 181 } 182 } 183 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io); 184 185 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, 186 loff_t pos, unsigned len) 187 { 188 struct inode *inode = file_inode(dio->iocb->ki_filp); 189 struct page *page = ZERO_PAGE(0); 190 struct bio *bio; 191 192 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); 193 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 194 GFP_KERNEL); 195 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); 196 bio->bi_private = dio; 197 bio->bi_end_io = iomap_dio_bio_end_io; 198 199 get_page(page); 200 __bio_add_page(bio, page, len, 0); 201 iomap_dio_submit_bio(iter, dio, bio, pos); 202 } 203 204 /* 205 * Figure out the bio's operation flags from the dio request, the 206 * mapping, and whether or not we want FUA. Note that we can end up 207 * clearing the WRITE_FUA flag in the dio request. 208 */ 209 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio, 210 const struct iomap *iomap, bool use_fua) 211 { 212 blk_opf_t opflags = REQ_SYNC | REQ_IDLE; 213 214 if (!(dio->flags & IOMAP_DIO_WRITE)) 215 return REQ_OP_READ; 216 217 opflags |= REQ_OP_WRITE; 218 if (use_fua) 219 opflags |= REQ_FUA; 220 else 221 dio->flags &= ~IOMAP_DIO_WRITE_FUA; 222 223 return opflags; 224 } 225 226 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, 227 struct iomap_dio *dio) 228 { 229 const struct iomap *iomap = &iter->iomap; 230 struct inode *inode = iter->inode; 231 unsigned int fs_block_size = i_blocksize(inode), pad; 232 loff_t length = iomap_length(iter); 233 loff_t pos = iter->pos; 234 blk_opf_t bio_opf; 235 struct bio *bio; 236 bool need_zeroout = false; 237 bool use_fua = false; 238 int nr_pages, ret = 0; 239 size_t copied = 0; 240 size_t orig_count; 241 242 if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) || 243 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) 244 return -EINVAL; 245 246 if (iomap->type == IOMAP_UNWRITTEN) { 247 dio->flags |= IOMAP_DIO_UNWRITTEN; 248 need_zeroout = true; 249 } 250 251 if (iomap->flags & IOMAP_F_SHARED) 252 dio->flags |= IOMAP_DIO_COW; 253 254 if (iomap->flags & IOMAP_F_NEW) { 255 need_zeroout = true; 256 } else if (iomap->type == IOMAP_MAPPED) { 257 /* 258 * Use a FUA write if we need datasync semantics, this is a pure 259 * data IO that doesn't require any metadata updates (including 260 * after IO completion such as unwritten extent conversion) and 261 * the underlying device supports FUA. This allows us to avoid 262 * cache flushes on IO completion. 263 */ 264 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 265 (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev)) 266 use_fua = true; 267 } 268 269 /* 270 * Save the original count and trim the iter to just the extent we 271 * are operating on right now. The iter will be re-expanded once 272 * we are done. 273 */ 274 orig_count = iov_iter_count(dio->submit.iter); 275 iov_iter_truncate(dio->submit.iter, length); 276 277 if (!iov_iter_count(dio->submit.iter)) 278 goto out; 279 280 /* 281 * We can only poll for single bio I/Os. 282 */ 283 if (need_zeroout || 284 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) 285 dio->iocb->ki_flags &= ~IOCB_HIPRI; 286 287 if (need_zeroout) { 288 /* zero out from the start of the block to the write offset */ 289 pad = pos & (fs_block_size - 1); 290 if (pad) 291 iomap_dio_zero(iter, dio, pos - pad, pad); 292 } 293 294 /* 295 * Set the operation flags early so that bio_iov_iter_get_pages 296 * can set up the page vector appropriately for a ZONE_APPEND 297 * operation. 298 */ 299 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua); 300 301 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); 302 do { 303 size_t n; 304 if (dio->error) { 305 iov_iter_revert(dio->submit.iter, copied); 306 copied = ret = 0; 307 goto out; 308 } 309 310 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf); 311 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 312 GFP_KERNEL); 313 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 314 bio->bi_ioprio = dio->iocb->ki_ioprio; 315 bio->bi_private = dio; 316 bio->bi_end_io = iomap_dio_bio_end_io; 317 318 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); 319 if (unlikely(ret)) { 320 /* 321 * We have to stop part way through an IO. We must fall 322 * through to the sub-block tail zeroing here, otherwise 323 * this short IO may expose stale data in the tail of 324 * the block we haven't written data to. 325 */ 326 bio_put(bio); 327 goto zero_tail; 328 } 329 330 n = bio->bi_iter.bi_size; 331 if (dio->flags & IOMAP_DIO_WRITE) { 332 task_io_account_write(n); 333 } else { 334 if (dio->flags & IOMAP_DIO_DIRTY) 335 bio_set_pages_dirty(bio); 336 } 337 338 dio->size += n; 339 copied += n; 340 341 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, 342 BIO_MAX_VECS); 343 /* 344 * We can only poll for single bio I/Os. 345 */ 346 if (nr_pages) 347 dio->iocb->ki_flags &= ~IOCB_HIPRI; 348 iomap_dio_submit_bio(iter, dio, bio, pos); 349 pos += n; 350 } while (nr_pages); 351 352 /* 353 * We need to zeroout the tail of a sub-block write if the extent type 354 * requires zeroing or the write extends beyond EOF. If we don't zero 355 * the block tail in the latter case, we can expose stale data via mmap 356 * reads of the EOF block. 357 */ 358 zero_tail: 359 if (need_zeroout || 360 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { 361 /* zero out from the end of the write to the end of the block */ 362 pad = pos & (fs_block_size - 1); 363 if (pad) 364 iomap_dio_zero(iter, dio, pos, fs_block_size - pad); 365 } 366 out: 367 /* Undo iter limitation to current extent */ 368 iov_iter_reexpand(dio->submit.iter, orig_count - copied); 369 if (copied) 370 return copied; 371 return ret; 372 } 373 374 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter, 375 struct iomap_dio *dio) 376 { 377 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter); 378 379 dio->size += length; 380 if (!length) 381 return -EFAULT; 382 return length; 383 } 384 385 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi, 386 struct iomap_dio *dio) 387 { 388 const struct iomap *iomap = &iomi->iomap; 389 struct iov_iter *iter = dio->submit.iter; 390 void *inline_data = iomap_inline_data(iomap, iomi->pos); 391 loff_t length = iomap_length(iomi); 392 loff_t pos = iomi->pos; 393 size_t copied; 394 395 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap))) 396 return -EIO; 397 398 if (dio->flags & IOMAP_DIO_WRITE) { 399 loff_t size = iomi->inode->i_size; 400 401 if (pos > size) 402 memset(iomap_inline_data(iomap, size), 0, pos - size); 403 copied = copy_from_iter(inline_data, length, iter); 404 if (copied) { 405 if (pos + copied > size) 406 i_size_write(iomi->inode, pos + copied); 407 mark_inode_dirty(iomi->inode); 408 } 409 } else { 410 copied = copy_to_iter(inline_data, length, iter); 411 } 412 dio->size += copied; 413 if (!copied) 414 return -EFAULT; 415 return copied; 416 } 417 418 static loff_t iomap_dio_iter(const struct iomap_iter *iter, 419 struct iomap_dio *dio) 420 { 421 switch (iter->iomap.type) { 422 case IOMAP_HOLE: 423 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 424 return -EIO; 425 return iomap_dio_hole_iter(iter, dio); 426 case IOMAP_UNWRITTEN: 427 if (!(dio->flags & IOMAP_DIO_WRITE)) 428 return iomap_dio_hole_iter(iter, dio); 429 return iomap_dio_bio_iter(iter, dio); 430 case IOMAP_MAPPED: 431 return iomap_dio_bio_iter(iter, dio); 432 case IOMAP_INLINE: 433 return iomap_dio_inline_iter(iter, dio); 434 case IOMAP_DELALLOC: 435 /* 436 * DIO is not serialised against mmap() access at all, and so 437 * if the page_mkwrite occurs between the writeback and the 438 * iomap_iter() call in the DIO path, then it will see the 439 * DELALLOC block that the page-mkwrite allocated. 440 */ 441 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n", 442 dio->iocb->ki_filp, current->comm); 443 return -EIO; 444 default: 445 WARN_ON_ONCE(1); 446 return -EIO; 447 } 448 } 449 450 /* 451 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 452 * is being issued as AIO or not. This allows us to optimise pure data writes 453 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 454 * REQ_FLUSH post write. This is slightly tricky because a single request here 455 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 456 * may be pure data writes. In that case, we still need to do a full data sync 457 * completion. 458 * 459 * When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL, 460 * __iomap_dio_rw can return a partial result if it encounters a non-resident 461 * page in @iter after preparing a transfer. In that case, the non-resident 462 * pages can be faulted in and the request resumed with @done_before set to the 463 * number of bytes previously transferred. The request will then complete with 464 * the correct total number of bytes transferred; this is essential for 465 * completing partial requests asynchronously. 466 * 467 * Returns -ENOTBLK In case of a page invalidation invalidation failure for 468 * writes. The callers needs to fall back to buffered I/O in this case. 469 */ 470 struct iomap_dio * 471 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 472 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 473 unsigned int dio_flags, void *private, size_t done_before) 474 { 475 struct inode *inode = file_inode(iocb->ki_filp); 476 struct iomap_iter iomi = { 477 .inode = inode, 478 .pos = iocb->ki_pos, 479 .len = iov_iter_count(iter), 480 .flags = IOMAP_DIRECT, 481 .private = private, 482 }; 483 bool wait_for_completion = 484 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT); 485 struct blk_plug plug; 486 struct iomap_dio *dio; 487 loff_t ret = 0; 488 489 trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before); 490 491 if (!iomi.len) 492 return NULL; 493 494 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 495 if (!dio) 496 return ERR_PTR(-ENOMEM); 497 498 dio->iocb = iocb; 499 atomic_set(&dio->ref, 1); 500 dio->size = 0; 501 dio->i_size = i_size_read(inode); 502 dio->dops = dops; 503 dio->error = 0; 504 dio->flags = 0; 505 dio->done_before = done_before; 506 507 dio->submit.iter = iter; 508 dio->submit.waiter = current; 509 dio->submit.poll_bio = NULL; 510 511 if (iocb->ki_flags & IOCB_NOWAIT) 512 iomi.flags |= IOMAP_NOWAIT; 513 514 if (iov_iter_rw(iter) == READ) { 515 if (iomi.pos >= dio->i_size) 516 goto out_free_dio; 517 518 if (user_backed_iter(iter)) 519 dio->flags |= IOMAP_DIO_DIRTY; 520 521 ret = kiocb_write_and_wait(iocb, iomi.len); 522 if (ret) 523 goto out_free_dio; 524 } else { 525 iomi.flags |= IOMAP_WRITE; 526 dio->flags |= IOMAP_DIO_WRITE; 527 528 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { 529 ret = -EAGAIN; 530 if (iomi.pos >= dio->i_size || 531 iomi.pos + iomi.len > dio->i_size) 532 goto out_free_dio; 533 iomi.flags |= IOMAP_OVERWRITE_ONLY; 534 } 535 536 /* for data sync or sync, we need sync completion processing */ 537 if (iocb_is_dsync(iocb)) { 538 dio->flags |= IOMAP_DIO_NEED_SYNC; 539 540 /* 541 * For datasync only writes, we optimistically try 542 * using FUA for this IO. Any non-FUA write that 543 * occurs will clear this flag, hence we know before 544 * completion whether a cache flush is necessary. 545 */ 546 if (!(iocb->ki_flags & IOCB_SYNC)) 547 dio->flags |= IOMAP_DIO_WRITE_FUA; 548 } 549 550 /* 551 * Try to invalidate cache pages for the range we are writing. 552 * If this invalidation fails, let the caller fall back to 553 * buffered I/O. 554 */ 555 ret = kiocb_invalidate_pages(iocb, iomi.len); 556 if (ret) { 557 if (ret != -EAGAIN) { 558 trace_iomap_dio_invalidate_fail(inode, iomi.pos, 559 iomi.len); 560 ret = -ENOTBLK; 561 } 562 goto out_free_dio; 563 } 564 565 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { 566 ret = sb_init_dio_done_wq(inode->i_sb); 567 if (ret < 0) 568 goto out_free_dio; 569 } 570 } 571 572 inode_dio_begin(inode); 573 574 blk_start_plug(&plug); 575 while ((ret = iomap_iter(&iomi, ops)) > 0) { 576 iomi.processed = iomap_dio_iter(&iomi, dio); 577 578 /* 579 * We can only poll for single bio I/Os. 580 */ 581 iocb->ki_flags &= ~IOCB_HIPRI; 582 } 583 584 blk_finish_plug(&plug); 585 586 /* 587 * We only report that we've read data up to i_size. 588 * Revert iter to a state corresponding to that as some callers (such 589 * as the splice code) rely on it. 590 */ 591 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size) 592 iov_iter_revert(iter, iomi.pos - dio->i_size); 593 594 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) { 595 if (!(iocb->ki_flags & IOCB_NOWAIT)) 596 wait_for_completion = true; 597 ret = 0; 598 } 599 600 /* magic error code to fall back to buffered I/O */ 601 if (ret == -ENOTBLK) { 602 wait_for_completion = true; 603 ret = 0; 604 } 605 if (ret < 0) 606 iomap_dio_set_error(dio, ret); 607 608 /* 609 * If all the writes we issued were FUA, we don't need to flush the 610 * cache on IO completion. Clear the sync flag for this case. 611 */ 612 if (dio->flags & IOMAP_DIO_WRITE_FUA) 613 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 614 615 WRITE_ONCE(iocb->private, dio->submit.poll_bio); 616 617 /* 618 * We are about to drop our additional submission reference, which 619 * might be the last reference to the dio. There are three different 620 * ways we can progress here: 621 * 622 * (a) If this is the last reference we will always complete and free 623 * the dio ourselves. 624 * (b) If this is not the last reference, and we serve an asynchronous 625 * iocb, we must never touch the dio after the decrement, the 626 * I/O completion handler will complete and free it. 627 * (c) If this is not the last reference, but we serve a synchronous 628 * iocb, the I/O completion handler will wake us up on the drop 629 * of the final reference, and we will complete and free it here 630 * after we got woken by the I/O completion handler. 631 */ 632 dio->wait_for_completion = wait_for_completion; 633 if (!atomic_dec_and_test(&dio->ref)) { 634 if (!wait_for_completion) { 635 trace_iomap_dio_rw_queued(inode, iomi.pos, iomi.len); 636 return ERR_PTR(-EIOCBQUEUED); 637 } 638 639 for (;;) { 640 set_current_state(TASK_UNINTERRUPTIBLE); 641 if (!READ_ONCE(dio->submit.waiter)) 642 break; 643 644 blk_io_schedule(); 645 } 646 __set_current_state(TASK_RUNNING); 647 } 648 649 return dio; 650 651 out_free_dio: 652 kfree(dio); 653 if (ret) 654 return ERR_PTR(ret); 655 return NULL; 656 } 657 EXPORT_SYMBOL_GPL(__iomap_dio_rw); 658 659 ssize_t 660 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 661 const struct iomap_ops *ops, const struct iomap_dio_ops *dops, 662 unsigned int dio_flags, void *private, size_t done_before) 663 { 664 struct iomap_dio *dio; 665 666 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private, 667 done_before); 668 if (IS_ERR_OR_NULL(dio)) 669 return PTR_ERR_OR_ZERO(dio); 670 return iomap_dio_complete(dio); 671 } 672 EXPORT_SYMBOL_GPL(iomap_dio_rw); 673