1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2019 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/iomap.h> 10 #include <linux/pagemap.h> 11 #include <linux/uio.h> 12 #include <linux/buffer_head.h> 13 #include <linux/dax.h> 14 #include <linux/writeback.h> 15 #include <linux/list_sort.h> 16 #include <linux/swap.h> 17 #include <linux/bio.h> 18 #include <linux/sched/signal.h> 19 #include <linux/migrate.h> 20 #include "trace.h" 21 22 #include "../internal.h" 23 24 #define IOEND_BATCH_SIZE 4096 25 26 /* 27 * Structure allocated for each folio when block size < folio size 28 * to track sub-folio uptodate status and I/O completions. 29 */ 30 struct iomap_page { 31 atomic_t read_bytes_pending; 32 atomic_t write_bytes_pending; 33 spinlock_t uptodate_lock; 34 unsigned long uptodate[]; 35 }; 36 37 static inline struct iomap_page *to_iomap_page(struct folio *folio) 38 { 39 if (folio_test_private(folio)) 40 return folio_get_private(folio); 41 return NULL; 42 } 43 44 static struct bio_set iomap_ioend_bioset; 45 46 static struct iomap_page * 47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) 48 { 49 struct iomap_page *iop = to_iomap_page(folio); 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 51 gfp_t gfp; 52 53 if (iop || nr_blocks <= 1) 54 return iop; 55 56 if (flags & IOMAP_NOWAIT) 57 gfp = GFP_NOWAIT; 58 else 59 gfp = GFP_NOFS | __GFP_NOFAIL; 60 61 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 62 gfp); 63 if (iop) { 64 spin_lock_init(&iop->uptodate_lock); 65 if (folio_test_uptodate(folio)) 66 bitmap_fill(iop->uptodate, nr_blocks); 67 folio_attach_private(folio, iop); 68 } 69 return iop; 70 } 71 72 static void iomap_page_release(struct folio *folio) 73 { 74 struct iomap_page *iop = folio_detach_private(folio); 75 struct inode *inode = folio->mapping->host; 76 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 77 78 if (!iop) 79 return; 80 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 81 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 82 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 83 folio_test_uptodate(folio)); 84 kfree(iop); 85 } 86 87 /* 88 * Calculate the range inside the folio that we actually need to read. 89 */ 90 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 91 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 92 { 93 struct iomap_page *iop = to_iomap_page(folio); 94 loff_t orig_pos = *pos; 95 loff_t isize = i_size_read(inode); 96 unsigned block_bits = inode->i_blkbits; 97 unsigned block_size = (1 << block_bits); 98 size_t poff = offset_in_folio(folio, *pos); 99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 100 unsigned first = poff >> block_bits; 101 unsigned last = (poff + plen - 1) >> block_bits; 102 103 /* 104 * If the block size is smaller than the page size, we need to check the 105 * per-block uptodate status and adjust the offset and length if needed 106 * to avoid reading in already uptodate ranges. 107 */ 108 if (iop) { 109 unsigned int i; 110 111 /* move forward for each leading block marked uptodate */ 112 for (i = first; i <= last; i++) { 113 if (!test_bit(i, iop->uptodate)) 114 break; 115 *pos += block_size; 116 poff += block_size; 117 plen -= block_size; 118 first++; 119 } 120 121 /* truncate len if we find any trailing uptodate block(s) */ 122 for ( ; i <= last; i++) { 123 if (test_bit(i, iop->uptodate)) { 124 plen -= (last - i + 1) * block_size; 125 last = i - 1; 126 break; 127 } 128 } 129 } 130 131 /* 132 * If the extent spans the block that contains the i_size, we need to 133 * handle both halves separately so that we properly zero data in the 134 * page cache for blocks that are entirely outside of i_size. 135 */ 136 if (orig_pos <= isize && orig_pos + length > isize) { 137 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 138 139 if (first <= end && last > end) 140 plen -= (last - end) * block_size; 141 } 142 143 *offp = poff; 144 *lenp = plen; 145 } 146 147 static void iomap_iop_set_range_uptodate(struct folio *folio, 148 struct iomap_page *iop, size_t off, size_t len) 149 { 150 struct inode *inode = folio->mapping->host; 151 unsigned first = off >> inode->i_blkbits; 152 unsigned last = (off + len - 1) >> inode->i_blkbits; 153 unsigned long flags; 154 155 spin_lock_irqsave(&iop->uptodate_lock, flags); 156 bitmap_set(iop->uptodate, first, last - first + 1); 157 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 158 folio_mark_uptodate(folio); 159 spin_unlock_irqrestore(&iop->uptodate_lock, flags); 160 } 161 162 static void iomap_set_range_uptodate(struct folio *folio, 163 struct iomap_page *iop, size_t off, size_t len) 164 { 165 if (folio_test_error(folio)) 166 return; 167 168 if (iop) 169 iomap_iop_set_range_uptodate(folio, iop, off, len); 170 else 171 folio_mark_uptodate(folio); 172 } 173 174 static void iomap_finish_folio_read(struct folio *folio, size_t offset, 175 size_t len, int error) 176 { 177 struct iomap_page *iop = to_iomap_page(folio); 178 179 if (unlikely(error)) { 180 folio_clear_uptodate(folio); 181 folio_set_error(folio); 182 } else { 183 iomap_set_range_uptodate(folio, iop, offset, len); 184 } 185 186 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 187 folio_unlock(folio); 188 } 189 190 static void iomap_read_end_io(struct bio *bio) 191 { 192 int error = blk_status_to_errno(bio->bi_status); 193 struct folio_iter fi; 194 195 bio_for_each_folio_all(fi, bio) 196 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 197 bio_put(bio); 198 } 199 200 struct iomap_readpage_ctx { 201 struct folio *cur_folio; 202 bool cur_folio_in_bio; 203 struct bio *bio; 204 struct readahead_control *rac; 205 }; 206 207 /** 208 * iomap_read_inline_data - copy inline data into the page cache 209 * @iter: iteration structure 210 * @folio: folio to copy to 211 * 212 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 213 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 214 * Returns zero for success to complete the read, or the usual negative errno. 215 */ 216 static int iomap_read_inline_data(const struct iomap_iter *iter, 217 struct folio *folio) 218 { 219 struct iomap_page *iop; 220 const struct iomap *iomap = iomap_iter_srcmap(iter); 221 size_t size = i_size_read(iter->inode) - iomap->offset; 222 size_t poff = offset_in_page(iomap->offset); 223 size_t offset = offset_in_folio(folio, iomap->offset); 224 void *addr; 225 226 if (folio_test_uptodate(folio)) 227 return 0; 228 229 if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 230 return -EIO; 231 if (WARN_ON_ONCE(size > PAGE_SIZE - 232 offset_in_page(iomap->inline_data))) 233 return -EIO; 234 if (WARN_ON_ONCE(size > iomap->length)) 235 return -EIO; 236 if (offset > 0) 237 iop = iomap_page_create(iter->inode, folio, iter->flags); 238 else 239 iop = to_iomap_page(folio); 240 241 addr = kmap_local_folio(folio, offset); 242 memcpy(addr, iomap->inline_data, size); 243 memset(addr + size, 0, PAGE_SIZE - poff - size); 244 kunmap_local(addr); 245 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 246 return 0; 247 } 248 249 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 250 loff_t pos) 251 { 252 const struct iomap *srcmap = iomap_iter_srcmap(iter); 253 254 return srcmap->type != IOMAP_MAPPED || 255 (srcmap->flags & IOMAP_F_NEW) || 256 pos >= i_size_read(iter->inode); 257 } 258 259 static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 260 struct iomap_readpage_ctx *ctx, loff_t offset) 261 { 262 const struct iomap *iomap = &iter->iomap; 263 loff_t pos = iter->pos + offset; 264 loff_t length = iomap_length(iter) - offset; 265 struct folio *folio = ctx->cur_folio; 266 struct iomap_page *iop; 267 loff_t orig_pos = pos; 268 size_t poff, plen; 269 sector_t sector; 270 271 if (iomap->type == IOMAP_INLINE) 272 return iomap_read_inline_data(iter, folio); 273 274 /* zero post-eof blocks as the page may be mapped */ 275 iop = iomap_page_create(iter->inode, folio, iter->flags); 276 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 277 if (plen == 0) 278 goto done; 279 280 if (iomap_block_needs_zeroing(iter, pos)) { 281 folio_zero_range(folio, poff, plen); 282 iomap_set_range_uptodate(folio, iop, poff, plen); 283 goto done; 284 } 285 286 ctx->cur_folio_in_bio = true; 287 if (iop) 288 atomic_add(plen, &iop->read_bytes_pending); 289 290 sector = iomap_sector(iomap, pos); 291 if (!ctx->bio || 292 bio_end_sector(ctx->bio) != sector || 293 !bio_add_folio(ctx->bio, folio, plen, poff)) { 294 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 295 gfp_t orig_gfp = gfp; 296 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 297 298 if (ctx->bio) 299 submit_bio(ctx->bio); 300 301 if (ctx->rac) /* same as readahead_gfp_mask */ 302 gfp |= __GFP_NORETRY | __GFP_NOWARN; 303 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 304 REQ_OP_READ, gfp); 305 /* 306 * If the bio_alloc fails, try it again for a single page to 307 * avoid having to deal with partial page reads. This emulates 308 * what do_mpage_read_folio does. 309 */ 310 if (!ctx->bio) { 311 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 312 orig_gfp); 313 } 314 if (ctx->rac) 315 ctx->bio->bi_opf |= REQ_RAHEAD; 316 ctx->bio->bi_iter.bi_sector = sector; 317 ctx->bio->bi_end_io = iomap_read_end_io; 318 bio_add_folio(ctx->bio, folio, plen, poff); 319 } 320 321 done: 322 /* 323 * Move the caller beyond our range so that it keeps making progress. 324 * For that, we have to include any leading non-uptodate ranges, but 325 * we can skip trailing ones as they will be handled in the next 326 * iteration. 327 */ 328 return pos - orig_pos + plen; 329 } 330 331 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 332 { 333 struct iomap_iter iter = { 334 .inode = folio->mapping->host, 335 .pos = folio_pos(folio), 336 .len = folio_size(folio), 337 }; 338 struct iomap_readpage_ctx ctx = { 339 .cur_folio = folio, 340 }; 341 int ret; 342 343 trace_iomap_readpage(iter.inode, 1); 344 345 while ((ret = iomap_iter(&iter, ops)) > 0) 346 iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 347 348 if (ret < 0) 349 folio_set_error(folio); 350 351 if (ctx.bio) { 352 submit_bio(ctx.bio); 353 WARN_ON_ONCE(!ctx.cur_folio_in_bio); 354 } else { 355 WARN_ON_ONCE(ctx.cur_folio_in_bio); 356 folio_unlock(folio); 357 } 358 359 /* 360 * Just like mpage_readahead and block_read_full_folio, we always 361 * return 0 and just set the folio error flag on errors. This 362 * should be cleaned up throughout the stack eventually. 363 */ 364 return 0; 365 } 366 EXPORT_SYMBOL_GPL(iomap_read_folio); 367 368 static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 369 struct iomap_readpage_ctx *ctx) 370 { 371 loff_t length = iomap_length(iter); 372 loff_t done, ret; 373 374 for (done = 0; done < length; done += ret) { 375 if (ctx->cur_folio && 376 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 377 if (!ctx->cur_folio_in_bio) 378 folio_unlock(ctx->cur_folio); 379 ctx->cur_folio = NULL; 380 } 381 if (!ctx->cur_folio) { 382 ctx->cur_folio = readahead_folio(ctx->rac); 383 ctx->cur_folio_in_bio = false; 384 } 385 ret = iomap_readpage_iter(iter, ctx, done); 386 if (ret <= 0) 387 return ret; 388 } 389 390 return done; 391 } 392 393 /** 394 * iomap_readahead - Attempt to read pages from a file. 395 * @rac: Describes the pages to be read. 396 * @ops: The operations vector for the filesystem. 397 * 398 * This function is for filesystems to call to implement their readahead 399 * address_space operation. 400 * 401 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 402 * blocks from disc), and may wait for it. The caller may be trying to 403 * access a different page, and so sleeping excessively should be avoided. 404 * It may allocate memory, but should avoid costly allocations. This 405 * function is called with memalloc_nofs set, so allocations will not cause 406 * the filesystem to be reentered. 407 */ 408 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 409 { 410 struct iomap_iter iter = { 411 .inode = rac->mapping->host, 412 .pos = readahead_pos(rac), 413 .len = readahead_length(rac), 414 }; 415 struct iomap_readpage_ctx ctx = { 416 .rac = rac, 417 }; 418 419 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 420 421 while (iomap_iter(&iter, ops) > 0) 422 iter.processed = iomap_readahead_iter(&iter, &ctx); 423 424 if (ctx.bio) 425 submit_bio(ctx.bio); 426 if (ctx.cur_folio) { 427 if (!ctx.cur_folio_in_bio) 428 folio_unlock(ctx.cur_folio); 429 } 430 } 431 EXPORT_SYMBOL_GPL(iomap_readahead); 432 433 /* 434 * iomap_is_partially_uptodate checks whether blocks within a folio are 435 * uptodate or not. 436 * 437 * Returns true if all blocks which correspond to the specified part 438 * of the folio are uptodate. 439 */ 440 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 441 { 442 struct iomap_page *iop = to_iomap_page(folio); 443 struct inode *inode = folio->mapping->host; 444 unsigned first, last, i; 445 446 if (!iop) 447 return false; 448 449 /* Caller's range may extend past the end of this folio */ 450 count = min(folio_size(folio) - from, count); 451 452 /* First and last blocks in range within folio */ 453 first = from >> inode->i_blkbits; 454 last = (from + count - 1) >> inode->i_blkbits; 455 456 for (i = first; i <= last; i++) 457 if (!test_bit(i, iop->uptodate)) 458 return false; 459 return true; 460 } 461 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 462 463 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 464 { 465 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 466 folio_size(folio)); 467 468 /* 469 * mm accommodates an old ext3 case where clean folios might 470 * not have had the dirty bit cleared. Thus, it can send actual 471 * dirty folios to ->release_folio() via shrink_active_list(); 472 * skip those here. 473 */ 474 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 475 return false; 476 iomap_page_release(folio); 477 return true; 478 } 479 EXPORT_SYMBOL_GPL(iomap_release_folio); 480 481 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 482 { 483 trace_iomap_invalidate_folio(folio->mapping->host, 484 folio_pos(folio) + offset, len); 485 486 /* 487 * If we're invalidating the entire folio, clear the dirty state 488 * from it and release it to avoid unnecessary buildup of the LRU. 489 */ 490 if (offset == 0 && len == folio_size(folio)) { 491 WARN_ON_ONCE(folio_test_writeback(folio)); 492 folio_cancel_dirty(folio); 493 iomap_page_release(folio); 494 } else if (folio_test_large(folio)) { 495 /* Must release the iop so the page can be split */ 496 WARN_ON_ONCE(!folio_test_uptodate(folio) && 497 folio_test_dirty(folio)); 498 iomap_page_release(folio); 499 } 500 } 501 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 502 503 #ifdef CONFIG_MIGRATION 504 int 505 iomap_migrate_page(struct address_space *mapping, struct page *newpage, 506 struct page *page, enum migrate_mode mode) 507 { 508 struct folio *folio = page_folio(page); 509 struct folio *newfolio = page_folio(newpage); 510 int ret; 511 512 ret = folio_migrate_mapping(mapping, newfolio, folio, 0); 513 if (ret != MIGRATEPAGE_SUCCESS) 514 return ret; 515 516 if (folio_test_private(folio)) 517 folio_attach_private(newfolio, folio_detach_private(folio)); 518 519 if (mode != MIGRATE_SYNC_NO_COPY) 520 folio_migrate_copy(newfolio, folio); 521 else 522 folio_migrate_flags(newfolio, folio); 523 return MIGRATEPAGE_SUCCESS; 524 } 525 EXPORT_SYMBOL_GPL(iomap_migrate_page); 526 #endif /* CONFIG_MIGRATION */ 527 528 static void 529 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 530 { 531 loff_t i_size = i_size_read(inode); 532 533 /* 534 * Only truncate newly allocated pages beyoned EOF, even if the 535 * write started inside the existing inode size. 536 */ 537 if (pos + len > i_size) 538 truncate_pagecache_range(inode, max(pos, i_size), 539 pos + len - 1); 540 } 541 542 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 543 size_t poff, size_t plen, const struct iomap *iomap) 544 { 545 struct bio_vec bvec; 546 struct bio bio; 547 548 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 549 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 550 bio_add_folio(&bio, folio, plen, poff); 551 return submit_bio_wait(&bio); 552 } 553 554 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 555 size_t len, struct folio *folio) 556 { 557 const struct iomap *srcmap = iomap_iter_srcmap(iter); 558 struct iomap_page *iop; 559 loff_t block_size = i_blocksize(iter->inode); 560 loff_t block_start = round_down(pos, block_size); 561 loff_t block_end = round_up(pos + len, block_size); 562 size_t from = offset_in_folio(folio, pos), to = from + len; 563 size_t poff, plen; 564 565 if (folio_test_uptodate(folio)) 566 return 0; 567 folio_clear_error(folio); 568 569 iop = iomap_page_create(iter->inode, folio, iter->flags); 570 571 do { 572 iomap_adjust_read_range(iter->inode, folio, &block_start, 573 block_end - block_start, &poff, &plen); 574 if (plen == 0) 575 break; 576 577 if (!(iter->flags & IOMAP_UNSHARE) && 578 (from <= poff || from >= poff + plen) && 579 (to <= poff || to >= poff + plen)) 580 continue; 581 582 if (iomap_block_needs_zeroing(iter, block_start)) { 583 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 584 return -EIO; 585 folio_zero_segments(folio, poff, from, to, poff + plen); 586 } else { 587 int status = iomap_read_folio_sync(block_start, folio, 588 poff, plen, srcmap); 589 if (status) 590 return status; 591 } 592 iomap_set_range_uptodate(folio, iop, poff, plen); 593 } while ((block_start += plen) < block_end); 594 595 return 0; 596 } 597 598 static int iomap_write_begin_inline(const struct iomap_iter *iter, 599 struct folio *folio) 600 { 601 /* needs more work for the tailpacking case; disable for now */ 602 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 603 return -EIO; 604 return iomap_read_inline_data(iter, folio); 605 } 606 607 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 608 size_t len, struct folio **foliop) 609 { 610 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 611 const struct iomap *srcmap = iomap_iter_srcmap(iter); 612 struct folio *folio; 613 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 614 int status = 0; 615 616 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 617 if (srcmap != &iter->iomap) 618 BUG_ON(pos + len > srcmap->offset + srcmap->length); 619 620 if (fatal_signal_pending(current)) 621 return -EINTR; 622 623 if (!mapping_large_folio_support(iter->inode->i_mapping)) 624 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 625 626 if (page_ops && page_ops->page_prepare) { 627 status = page_ops->page_prepare(iter->inode, pos, len); 628 if (status) 629 return status; 630 } 631 632 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 633 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 634 if (!folio) { 635 status = -ENOMEM; 636 goto out_no_page; 637 } 638 if (pos + len > folio_pos(folio) + folio_size(folio)) 639 len = folio_pos(folio) + folio_size(folio) - pos; 640 641 if (srcmap->type == IOMAP_INLINE) 642 status = iomap_write_begin_inline(iter, folio); 643 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 644 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 645 else 646 status = __iomap_write_begin(iter, pos, len, folio); 647 648 if (unlikely(status)) 649 goto out_unlock; 650 651 *foliop = folio; 652 return 0; 653 654 out_unlock: 655 folio_unlock(folio); 656 folio_put(folio); 657 iomap_write_failed(iter->inode, pos, len); 658 659 out_no_page: 660 if (page_ops && page_ops->page_done) 661 page_ops->page_done(iter->inode, pos, 0, NULL); 662 return status; 663 } 664 665 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 666 size_t copied, struct folio *folio) 667 { 668 struct iomap_page *iop = to_iomap_page(folio); 669 flush_dcache_folio(folio); 670 671 /* 672 * The blocks that were entirely written will now be uptodate, so we 673 * don't have to worry about a read_folio reading them and overwriting a 674 * partial write. However, if we've encountered a short write and only 675 * partially written into a block, it will not be marked uptodate, so a 676 * read_folio might come in and destroy our partial write. 677 * 678 * Do the simplest thing and just treat any short write to a 679 * non-uptodate page as a zero-length write, and force the caller to 680 * redo the whole thing. 681 */ 682 if (unlikely(copied < len && !folio_test_uptodate(folio))) 683 return 0; 684 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 685 filemap_dirty_folio(inode->i_mapping, folio); 686 return copied; 687 } 688 689 static size_t iomap_write_end_inline(const struct iomap_iter *iter, 690 struct folio *folio, loff_t pos, size_t copied) 691 { 692 const struct iomap *iomap = &iter->iomap; 693 void *addr; 694 695 WARN_ON_ONCE(!folio_test_uptodate(folio)); 696 BUG_ON(!iomap_inline_data_valid(iomap)); 697 698 flush_dcache_folio(folio); 699 addr = kmap_local_folio(folio, pos); 700 memcpy(iomap_inline_data(iomap, pos), addr, copied); 701 kunmap_local(addr); 702 703 mark_inode_dirty(iter->inode); 704 return copied; 705 } 706 707 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 708 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 709 size_t copied, struct folio *folio) 710 { 711 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 712 const struct iomap *srcmap = iomap_iter_srcmap(iter); 713 loff_t old_size = iter->inode->i_size; 714 size_t ret; 715 716 if (srcmap->type == IOMAP_INLINE) { 717 ret = iomap_write_end_inline(iter, folio, pos, copied); 718 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 719 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 720 copied, &folio->page, NULL); 721 } else { 722 ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 723 } 724 725 /* 726 * Update the in-memory inode size after copying the data into the page 727 * cache. It's up to the file system to write the updated size to disk, 728 * preferably after I/O completion so that no stale data is exposed. 729 */ 730 if (pos + ret > old_size) { 731 i_size_write(iter->inode, pos + ret); 732 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 733 } 734 folio_unlock(folio); 735 736 if (old_size < pos) 737 pagecache_isize_extended(iter->inode, old_size, pos); 738 if (page_ops && page_ops->page_done) 739 page_ops->page_done(iter->inode, pos, ret, &folio->page); 740 folio_put(folio); 741 742 if (ret < len) 743 iomap_write_failed(iter->inode, pos + ret, len - ret); 744 return ret; 745 } 746 747 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 748 { 749 loff_t length = iomap_length(iter); 750 loff_t pos = iter->pos; 751 ssize_t written = 0; 752 long status = 0; 753 754 do { 755 struct folio *folio; 756 struct page *page; 757 unsigned long offset; /* Offset into pagecache page */ 758 unsigned long bytes; /* Bytes to write to page */ 759 size_t copied; /* Bytes copied from user */ 760 761 offset = offset_in_page(pos); 762 bytes = min_t(unsigned long, PAGE_SIZE - offset, 763 iov_iter_count(i)); 764 again: 765 if (bytes > length) 766 bytes = length; 767 768 /* 769 * Bring in the user page that we'll copy from _first_. 770 * Otherwise there's a nasty deadlock on copying from the 771 * same page as we're writing to, without it being marked 772 * up-to-date. 773 */ 774 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 775 status = -EFAULT; 776 break; 777 } 778 779 status = iomap_write_begin(iter, pos, bytes, &folio); 780 if (unlikely(status)) 781 break; 782 783 page = folio_file_page(folio, pos >> PAGE_SHIFT); 784 if (mapping_writably_mapped(iter->inode->i_mapping)) 785 flush_dcache_page(page); 786 787 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 788 789 status = iomap_write_end(iter, pos, bytes, copied, folio); 790 791 if (unlikely(copied != status)) 792 iov_iter_revert(i, copied - status); 793 794 cond_resched(); 795 if (unlikely(status == 0)) { 796 /* 797 * A short copy made iomap_write_end() reject the 798 * thing entirely. Might be memory poisoning 799 * halfway through, might be a race with munmap, 800 * might be severe memory pressure. 801 */ 802 if (copied) 803 bytes = copied; 804 goto again; 805 } 806 pos += status; 807 written += status; 808 length -= status; 809 810 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 811 } while (iov_iter_count(i) && length); 812 813 return written ? written : status; 814 } 815 816 ssize_t 817 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 818 const struct iomap_ops *ops) 819 { 820 struct iomap_iter iter = { 821 .inode = iocb->ki_filp->f_mapping->host, 822 .pos = iocb->ki_pos, 823 .len = iov_iter_count(i), 824 .flags = IOMAP_WRITE, 825 }; 826 int ret; 827 828 while ((ret = iomap_iter(&iter, ops)) > 0) 829 iter.processed = iomap_write_iter(&iter, i); 830 if (iter.pos == iocb->ki_pos) 831 return ret; 832 return iter.pos - iocb->ki_pos; 833 } 834 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 835 836 static loff_t iomap_unshare_iter(struct iomap_iter *iter) 837 { 838 struct iomap *iomap = &iter->iomap; 839 const struct iomap *srcmap = iomap_iter_srcmap(iter); 840 loff_t pos = iter->pos; 841 loff_t length = iomap_length(iter); 842 long status = 0; 843 loff_t written = 0; 844 845 /* don't bother with blocks that are not shared to start with */ 846 if (!(iomap->flags & IOMAP_F_SHARED)) 847 return length; 848 /* don't bother with holes or unwritten extents */ 849 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 850 return length; 851 852 do { 853 unsigned long offset = offset_in_page(pos); 854 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 855 struct folio *folio; 856 857 status = iomap_write_begin(iter, pos, bytes, &folio); 858 if (unlikely(status)) 859 return status; 860 861 status = iomap_write_end(iter, pos, bytes, bytes, folio); 862 if (WARN_ON_ONCE(status == 0)) 863 return -EIO; 864 865 cond_resched(); 866 867 pos += status; 868 written += status; 869 length -= status; 870 871 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 872 } while (length); 873 874 return written; 875 } 876 877 int 878 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 879 const struct iomap_ops *ops) 880 { 881 struct iomap_iter iter = { 882 .inode = inode, 883 .pos = pos, 884 .len = len, 885 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 886 }; 887 int ret; 888 889 while ((ret = iomap_iter(&iter, ops)) > 0) 890 iter.processed = iomap_unshare_iter(&iter); 891 return ret; 892 } 893 EXPORT_SYMBOL_GPL(iomap_file_unshare); 894 895 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 896 { 897 const struct iomap *srcmap = iomap_iter_srcmap(iter); 898 loff_t pos = iter->pos; 899 loff_t length = iomap_length(iter); 900 loff_t written = 0; 901 902 /* already zeroed? we're done. */ 903 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 904 return length; 905 906 do { 907 struct folio *folio; 908 int status; 909 size_t offset; 910 size_t bytes = min_t(u64, SIZE_MAX, length); 911 912 status = iomap_write_begin(iter, pos, bytes, &folio); 913 if (status) 914 return status; 915 916 offset = offset_in_folio(folio, pos); 917 if (bytes > folio_size(folio) - offset) 918 bytes = folio_size(folio) - offset; 919 920 folio_zero_range(folio, offset, bytes); 921 folio_mark_accessed(folio); 922 923 bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 924 if (WARN_ON_ONCE(bytes == 0)) 925 return -EIO; 926 927 pos += bytes; 928 length -= bytes; 929 written += bytes; 930 if (did_zero) 931 *did_zero = true; 932 } while (length > 0); 933 934 return written; 935 } 936 937 int 938 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 939 const struct iomap_ops *ops) 940 { 941 struct iomap_iter iter = { 942 .inode = inode, 943 .pos = pos, 944 .len = len, 945 .flags = IOMAP_ZERO, 946 }; 947 int ret; 948 949 while ((ret = iomap_iter(&iter, ops)) > 0) 950 iter.processed = iomap_zero_iter(&iter, did_zero); 951 return ret; 952 } 953 EXPORT_SYMBOL_GPL(iomap_zero_range); 954 955 int 956 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 957 const struct iomap_ops *ops) 958 { 959 unsigned int blocksize = i_blocksize(inode); 960 unsigned int off = pos & (blocksize - 1); 961 962 /* Block boundary? Nothing to do */ 963 if (!off) 964 return 0; 965 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 966 } 967 EXPORT_SYMBOL_GPL(iomap_truncate_page); 968 969 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 970 struct folio *folio) 971 { 972 loff_t length = iomap_length(iter); 973 int ret; 974 975 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 976 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 977 &iter->iomap); 978 if (ret) 979 return ret; 980 block_commit_write(&folio->page, 0, length); 981 } else { 982 WARN_ON_ONCE(!folio_test_uptodate(folio)); 983 folio_mark_dirty(folio); 984 } 985 986 return length; 987 } 988 989 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 990 { 991 struct iomap_iter iter = { 992 .inode = file_inode(vmf->vma->vm_file), 993 .flags = IOMAP_WRITE | IOMAP_FAULT, 994 }; 995 struct folio *folio = page_folio(vmf->page); 996 ssize_t ret; 997 998 folio_lock(folio); 999 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1000 if (ret < 0) 1001 goto out_unlock; 1002 iter.pos = folio_pos(folio); 1003 iter.len = ret; 1004 while ((ret = iomap_iter(&iter, ops)) > 0) 1005 iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1006 1007 if (ret < 0) 1008 goto out_unlock; 1009 folio_wait_stable(folio); 1010 return VM_FAULT_LOCKED; 1011 out_unlock: 1012 folio_unlock(folio); 1013 return block_page_mkwrite_return(ret); 1014 } 1015 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1016 1017 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1018 size_t len, int error) 1019 { 1020 struct iomap_page *iop = to_iomap_page(folio); 1021 1022 if (error) { 1023 folio_set_error(folio); 1024 mapping_set_error(inode->i_mapping, error); 1025 } 1026 1027 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 1028 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1029 1030 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 1031 folio_end_writeback(folio); 1032 } 1033 1034 /* 1035 * We're now finished for good with this ioend structure. Update the page 1036 * state, release holds on bios, and finally free up memory. Do not use the 1037 * ioend after this. 1038 */ 1039 static u32 1040 iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1041 { 1042 struct inode *inode = ioend->io_inode; 1043 struct bio *bio = &ioend->io_inline_bio; 1044 struct bio *last = ioend->io_bio, *next; 1045 u64 start = bio->bi_iter.bi_sector; 1046 loff_t offset = ioend->io_offset; 1047 bool quiet = bio_flagged(bio, BIO_QUIET); 1048 u32 folio_count = 0; 1049 1050 for (bio = &ioend->io_inline_bio; bio; bio = next) { 1051 struct folio_iter fi; 1052 1053 /* 1054 * For the last bio, bi_private points to the ioend, so we 1055 * need to explicitly end the iteration here. 1056 */ 1057 if (bio == last) 1058 next = NULL; 1059 else 1060 next = bio->bi_private; 1061 1062 /* walk all folios in bio, ending page IO on them */ 1063 bio_for_each_folio_all(fi, bio) { 1064 iomap_finish_folio_write(inode, fi.folio, fi.length, 1065 error); 1066 folio_count++; 1067 } 1068 bio_put(bio); 1069 } 1070 /* The ioend has been freed by bio_put() */ 1071 1072 if (unlikely(error && !quiet)) { 1073 printk_ratelimited(KERN_ERR 1074 "%s: writeback error on inode %lu, offset %lld, sector %llu", 1075 inode->i_sb->s_id, inode->i_ino, offset, start); 1076 } 1077 return folio_count; 1078 } 1079 1080 /* 1081 * Ioend completion routine for merged bios. This can only be called from task 1082 * contexts as merged ioends can be of unbound length. Hence we have to break up 1083 * the writeback completions into manageable chunks to avoid long scheduler 1084 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1085 * good batch processing throughput without creating adverse scheduler latency 1086 * conditions. 1087 */ 1088 void 1089 iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1090 { 1091 struct list_head tmp; 1092 u32 completions; 1093 1094 might_sleep(); 1095 1096 list_replace_init(&ioend->io_list, &tmp); 1097 completions = iomap_finish_ioend(ioend, error); 1098 1099 while (!list_empty(&tmp)) { 1100 if (completions > IOEND_BATCH_SIZE * 8) { 1101 cond_resched(); 1102 completions = 0; 1103 } 1104 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1105 list_del_init(&ioend->io_list); 1106 completions += iomap_finish_ioend(ioend, error); 1107 } 1108 } 1109 EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1110 1111 /* 1112 * We can merge two adjacent ioends if they have the same set of work to do. 1113 */ 1114 static bool 1115 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1116 { 1117 if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1118 return false; 1119 if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1120 (next->io_flags & IOMAP_F_SHARED)) 1121 return false; 1122 if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1123 (next->io_type == IOMAP_UNWRITTEN)) 1124 return false; 1125 if (ioend->io_offset + ioend->io_size != next->io_offset) 1126 return false; 1127 /* 1128 * Do not merge physically discontiguous ioends. The filesystem 1129 * completion functions will have to iterate the physical 1130 * discontiguities even if we merge the ioends at a logical level, so 1131 * we don't gain anything by merging physical discontiguities here. 1132 * 1133 * We cannot use bio->bi_iter.bi_sector here as it is modified during 1134 * submission so does not point to the start sector of the bio at 1135 * completion. 1136 */ 1137 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1138 return false; 1139 return true; 1140 } 1141 1142 void 1143 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1144 { 1145 struct iomap_ioend *next; 1146 1147 INIT_LIST_HEAD(&ioend->io_list); 1148 1149 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1150 io_list))) { 1151 if (!iomap_ioend_can_merge(ioend, next)) 1152 break; 1153 list_move_tail(&next->io_list, &ioend->io_list); 1154 ioend->io_size += next->io_size; 1155 } 1156 } 1157 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1158 1159 static int 1160 iomap_ioend_compare(void *priv, const struct list_head *a, 1161 const struct list_head *b) 1162 { 1163 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1164 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1165 1166 if (ia->io_offset < ib->io_offset) 1167 return -1; 1168 if (ia->io_offset > ib->io_offset) 1169 return 1; 1170 return 0; 1171 } 1172 1173 void 1174 iomap_sort_ioends(struct list_head *ioend_list) 1175 { 1176 list_sort(NULL, ioend_list, iomap_ioend_compare); 1177 } 1178 EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1179 1180 static void iomap_writepage_end_bio(struct bio *bio) 1181 { 1182 struct iomap_ioend *ioend = bio->bi_private; 1183 1184 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1185 } 1186 1187 /* 1188 * Submit the final bio for an ioend. 1189 * 1190 * If @error is non-zero, it means that we have a situation where some part of 1191 * the submission process has failed after we've marked pages for writeback 1192 * and unlocked them. In this situation, we need to fail the bio instead of 1193 * submitting it. This typically only happens on a filesystem shutdown. 1194 */ 1195 static int 1196 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1197 int error) 1198 { 1199 ioend->io_bio->bi_private = ioend; 1200 ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1201 1202 if (wpc->ops->prepare_ioend) 1203 error = wpc->ops->prepare_ioend(ioend, error); 1204 if (error) { 1205 /* 1206 * If we're failing the IO now, just mark the ioend with an 1207 * error and finish it. This will run IO completion immediately 1208 * as there is only one reference to the ioend at this point in 1209 * time. 1210 */ 1211 ioend->io_bio->bi_status = errno_to_blk_status(error); 1212 bio_endio(ioend->io_bio); 1213 return error; 1214 } 1215 1216 submit_bio(ioend->io_bio); 1217 return 0; 1218 } 1219 1220 static struct iomap_ioend * 1221 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1222 loff_t offset, sector_t sector, struct writeback_control *wbc) 1223 { 1224 struct iomap_ioend *ioend; 1225 struct bio *bio; 1226 1227 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1228 REQ_OP_WRITE | wbc_to_write_flags(wbc), 1229 GFP_NOFS, &iomap_ioend_bioset); 1230 bio->bi_iter.bi_sector = sector; 1231 wbc_init_bio(wbc, bio); 1232 1233 ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1234 INIT_LIST_HEAD(&ioend->io_list); 1235 ioend->io_type = wpc->iomap.type; 1236 ioend->io_flags = wpc->iomap.flags; 1237 ioend->io_inode = inode; 1238 ioend->io_size = 0; 1239 ioend->io_folios = 0; 1240 ioend->io_offset = offset; 1241 ioend->io_bio = bio; 1242 ioend->io_sector = sector; 1243 return ioend; 1244 } 1245 1246 /* 1247 * Allocate a new bio, and chain the old bio to the new one. 1248 * 1249 * Note that we have to perform the chaining in this unintuitive order 1250 * so that the bi_private linkage is set up in the right direction for the 1251 * traversal in iomap_finish_ioend(). 1252 */ 1253 static struct bio * 1254 iomap_chain_bio(struct bio *prev) 1255 { 1256 struct bio *new; 1257 1258 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 1259 bio_clone_blkg_association(new, prev); 1260 new->bi_iter.bi_sector = bio_end_sector(prev); 1261 1262 bio_chain(prev, new); 1263 bio_get(prev); /* for iomap_finish_ioend */ 1264 submit_bio(prev); 1265 return new; 1266 } 1267 1268 static bool 1269 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1270 sector_t sector) 1271 { 1272 if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1273 (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1274 return false; 1275 if (wpc->iomap.type != wpc->ioend->io_type) 1276 return false; 1277 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1278 return false; 1279 if (sector != bio_end_sector(wpc->ioend->io_bio)) 1280 return false; 1281 /* 1282 * Limit ioend bio chain lengths to minimise IO completion latency. This 1283 * also prevents long tight loops ending page writeback on all the 1284 * folios in the ioend. 1285 */ 1286 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1287 return false; 1288 return true; 1289 } 1290 1291 /* 1292 * Test to see if we have an existing ioend structure that we could append to 1293 * first; otherwise finish off the current ioend and start another. 1294 */ 1295 static void 1296 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1297 struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1298 struct writeback_control *wbc, struct list_head *iolist) 1299 { 1300 sector_t sector = iomap_sector(&wpc->iomap, pos); 1301 unsigned len = i_blocksize(inode); 1302 size_t poff = offset_in_folio(folio, pos); 1303 1304 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1305 if (wpc->ioend) 1306 list_add(&wpc->ioend->io_list, iolist); 1307 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1308 } 1309 1310 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1311 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1312 bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1313 } 1314 1315 if (iop) 1316 atomic_add(len, &iop->write_bytes_pending); 1317 wpc->ioend->io_size += len; 1318 wbc_account_cgroup_owner(wbc, &folio->page, len); 1319 } 1320 1321 /* 1322 * We implement an immediate ioend submission policy here to avoid needing to 1323 * chain multiple ioends and hence nest mempool allocations which can violate 1324 * the forward progress guarantees we need to provide. The current ioend we're 1325 * adding blocks to is cached in the writepage context, and if the new block 1326 * doesn't append to the cached ioend, it will create a new ioend and cache that 1327 * instead. 1328 * 1329 * If a new ioend is created and cached, the old ioend is returned and queued 1330 * locally for submission once the entire page is processed or an error has been 1331 * detected. While ioends are submitted immediately after they are completed, 1332 * batching optimisations are provided by higher level block plugging. 1333 * 1334 * At the end of a writeback pass, there will be a cached ioend remaining on the 1335 * writepage context that the caller will need to submit. 1336 */ 1337 static int 1338 iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1339 struct writeback_control *wbc, struct inode *inode, 1340 struct folio *folio, u64 end_pos) 1341 { 1342 struct iomap_page *iop = iomap_page_create(inode, folio, 0); 1343 struct iomap_ioend *ioend, *next; 1344 unsigned len = i_blocksize(inode); 1345 unsigned nblocks = i_blocks_per_folio(inode, folio); 1346 u64 pos = folio_pos(folio); 1347 int error = 0, count = 0, i; 1348 LIST_HEAD(submit_list); 1349 1350 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1351 1352 /* 1353 * Walk through the folio to find areas to write back. If we 1354 * run off the end of the current map or find the current map 1355 * invalid, grab a new one. 1356 */ 1357 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1358 if (iop && !test_bit(i, iop->uptodate)) 1359 continue; 1360 1361 error = wpc->ops->map_blocks(wpc, inode, pos); 1362 if (error) 1363 break; 1364 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 1365 continue; 1366 if (wpc->iomap.type == IOMAP_HOLE) 1367 continue; 1368 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1369 &submit_list); 1370 count++; 1371 } 1372 if (count) 1373 wpc->ioend->io_folios++; 1374 1375 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1376 WARN_ON_ONCE(!folio_test_locked(folio)); 1377 WARN_ON_ONCE(folio_test_writeback(folio)); 1378 WARN_ON_ONCE(folio_test_dirty(folio)); 1379 1380 /* 1381 * We cannot cancel the ioend directly here on error. We may have 1382 * already set other pages under writeback and hence we have to run I/O 1383 * completion to mark the error state of the pages under writeback 1384 * appropriately. 1385 */ 1386 if (unlikely(error)) { 1387 /* 1388 * Let the filesystem know what portion of the current page 1389 * failed to map. If the page hasn't been added to ioend, it 1390 * won't be affected by I/O completion and we must unlock it 1391 * now. 1392 */ 1393 if (wpc->ops->discard_folio) 1394 wpc->ops->discard_folio(folio, pos); 1395 if (!count) { 1396 folio_unlock(folio); 1397 goto done; 1398 } 1399 } 1400 1401 folio_start_writeback(folio); 1402 folio_unlock(folio); 1403 1404 /* 1405 * Preserve the original error if there was one; catch 1406 * submission errors here and propagate into subsequent ioend 1407 * submissions. 1408 */ 1409 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1410 int error2; 1411 1412 list_del_init(&ioend->io_list); 1413 error2 = iomap_submit_ioend(wpc, ioend, error); 1414 if (error2 && !error) 1415 error = error2; 1416 } 1417 1418 /* 1419 * We can end up here with no error and nothing to write only if we race 1420 * with a partial page truncate on a sub-page block sized filesystem. 1421 */ 1422 if (!count) 1423 folio_end_writeback(folio); 1424 done: 1425 mapping_set_error(folio->mapping, error); 1426 return error; 1427 } 1428 1429 /* 1430 * Write out a dirty page. 1431 * 1432 * For delalloc space on the page, we need to allocate space and flush it. 1433 * For unwritten space on the page, we need to start the conversion to 1434 * regular allocated space. 1435 */ 1436 static int 1437 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1438 { 1439 struct folio *folio = page_folio(page); 1440 struct iomap_writepage_ctx *wpc = data; 1441 struct inode *inode = folio->mapping->host; 1442 u64 end_pos, isize; 1443 1444 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1445 1446 /* 1447 * Refuse to write the folio out if we're called from reclaim context. 1448 * 1449 * This avoids stack overflows when called from deeply used stacks in 1450 * random callers for direct reclaim or memcg reclaim. We explicitly 1451 * allow reclaim from kswapd as the stack usage there is relatively low. 1452 * 1453 * This should never happen except in the case of a VM regression so 1454 * warn about it. 1455 */ 1456 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1457 PF_MEMALLOC)) 1458 goto redirty; 1459 1460 /* 1461 * Is this folio beyond the end of the file? 1462 * 1463 * The folio index is less than the end_index, adjust the end_pos 1464 * to the highest offset that this folio should represent. 1465 * ----------------------------------------------------- 1466 * | file mapping | <EOF> | 1467 * ----------------------------------------------------- 1468 * | Page ... | Page N-2 | Page N-1 | Page N | | 1469 * ^--------------------------------^----------|-------- 1470 * | desired writeback range | see else | 1471 * ---------------------------------^------------------| 1472 */ 1473 isize = i_size_read(inode); 1474 end_pos = folio_pos(folio) + folio_size(folio); 1475 if (end_pos > isize) { 1476 /* 1477 * Check whether the page to write out is beyond or straddles 1478 * i_size or not. 1479 * ------------------------------------------------------- 1480 * | file mapping | <EOF> | 1481 * ------------------------------------------------------- 1482 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1483 * ^--------------------------------^-----------|--------- 1484 * | | Straddles | 1485 * ---------------------------------^-----------|--------| 1486 */ 1487 size_t poff = offset_in_folio(folio, isize); 1488 pgoff_t end_index = isize >> PAGE_SHIFT; 1489 1490 /* 1491 * Skip the page if it's fully outside i_size, e.g. due to a 1492 * truncate operation that's in progress. We must redirty the 1493 * page so that reclaim stops reclaiming it. Otherwise 1494 * iomap_release_folio() is called on it and gets confused. 1495 * 1496 * Note that the end_index is unsigned long. If the given 1497 * offset is greater than 16TB on a 32-bit system then if we 1498 * checked if the page is fully outside i_size with 1499 * "if (page->index >= end_index + 1)", "end_index + 1" would 1500 * overflow and evaluate to 0. Hence this page would be 1501 * redirtied and written out repeatedly, which would result in 1502 * an infinite loop; the user program performing this operation 1503 * would hang. Instead, we can detect this situation by 1504 * checking if the page is totally beyond i_size or if its 1505 * offset is just equal to the EOF. 1506 */ 1507 if (folio->index > end_index || 1508 (folio->index == end_index && poff == 0)) 1509 goto redirty; 1510 1511 /* 1512 * The page straddles i_size. It must be zeroed out on each 1513 * and every writepage invocation because it may be mmapped. 1514 * "A file is mapped in multiples of the page size. For a file 1515 * that is not a multiple of the page size, the remaining 1516 * memory is zeroed when mapped, and writes to that region are 1517 * not written out to the file." 1518 */ 1519 folio_zero_segment(folio, poff, folio_size(folio)); 1520 end_pos = isize; 1521 } 1522 1523 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1524 1525 redirty: 1526 folio_redirty_for_writepage(wbc, folio); 1527 folio_unlock(folio); 1528 return 0; 1529 } 1530 1531 int 1532 iomap_writepage(struct page *page, struct writeback_control *wbc, 1533 struct iomap_writepage_ctx *wpc, 1534 const struct iomap_writeback_ops *ops) 1535 { 1536 int ret; 1537 1538 wpc->ops = ops; 1539 ret = iomap_do_writepage(page, wbc, wpc); 1540 if (!wpc->ioend) 1541 return ret; 1542 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1543 } 1544 EXPORT_SYMBOL_GPL(iomap_writepage); 1545 1546 int 1547 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1548 struct iomap_writepage_ctx *wpc, 1549 const struct iomap_writeback_ops *ops) 1550 { 1551 int ret; 1552 1553 wpc->ops = ops; 1554 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1555 if (!wpc->ioend) 1556 return ret; 1557 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1558 } 1559 EXPORT_SYMBOL_GPL(iomap_writepages); 1560 1561 static int __init iomap_init(void) 1562 { 1563 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1564 offsetof(struct iomap_ioend, io_inline_bio), 1565 BIOSET_NEED_BVECS); 1566 } 1567 fs_initcall(iomap_init); 1568