1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2019 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/iomap.h> 10 #include <linux/pagemap.h> 11 #include <linux/uio.h> 12 #include <linux/buffer_head.h> 13 #include <linux/dax.h> 14 #include <linux/writeback.h> 15 #include <linux/list_sort.h> 16 #include <linux/swap.h> 17 #include <linux/bio.h> 18 #include <linux/sched/signal.h> 19 #include <linux/migrate.h> 20 #include "trace.h" 21 22 #include "../internal.h" 23 24 #define IOEND_BATCH_SIZE 4096 25 26 /* 27 * Structure allocated for each folio when block size < folio size 28 * to track sub-folio uptodate status and I/O completions. 29 */ 30 struct iomap_page { 31 atomic_t read_bytes_pending; 32 atomic_t write_bytes_pending; 33 spinlock_t uptodate_lock; 34 unsigned long uptodate[]; 35 }; 36 37 static inline struct iomap_page *to_iomap_page(struct folio *folio) 38 { 39 if (folio_test_private(folio)) 40 return folio_get_private(folio); 41 return NULL; 42 } 43 44 static struct bio_set iomap_ioend_bioset; 45 46 static struct iomap_page * 47 iomap_page_create(struct inode *inode, struct folio *folio) 48 { 49 struct iomap_page *iop = to_iomap_page(folio); 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 51 52 if (iop || nr_blocks <= 1) 53 return iop; 54 55 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 56 GFP_NOFS | __GFP_NOFAIL); 57 spin_lock_init(&iop->uptodate_lock); 58 if (folio_test_uptodate(folio)) 59 bitmap_fill(iop->uptodate, nr_blocks); 60 folio_attach_private(folio, iop); 61 return iop; 62 } 63 64 static void iomap_page_release(struct folio *folio) 65 { 66 struct iomap_page *iop = folio_detach_private(folio); 67 struct inode *inode = folio->mapping->host; 68 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 69 70 if (!iop) 71 return; 72 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 73 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 74 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 75 folio_test_uptodate(folio)); 76 kfree(iop); 77 } 78 79 /* 80 * Calculate the range inside the folio that we actually need to read. 81 */ 82 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 83 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 84 { 85 struct iomap_page *iop = to_iomap_page(folio); 86 loff_t orig_pos = *pos; 87 loff_t isize = i_size_read(inode); 88 unsigned block_bits = inode->i_blkbits; 89 unsigned block_size = (1 << block_bits); 90 size_t poff = offset_in_folio(folio, *pos); 91 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 92 unsigned first = poff >> block_bits; 93 unsigned last = (poff + plen - 1) >> block_bits; 94 95 /* 96 * If the block size is smaller than the page size, we need to check the 97 * per-block uptodate status and adjust the offset and length if needed 98 * to avoid reading in already uptodate ranges. 99 */ 100 if (iop) { 101 unsigned int i; 102 103 /* move forward for each leading block marked uptodate */ 104 for (i = first; i <= last; i++) { 105 if (!test_bit(i, iop->uptodate)) 106 break; 107 *pos += block_size; 108 poff += block_size; 109 plen -= block_size; 110 first++; 111 } 112 113 /* truncate len if we find any trailing uptodate block(s) */ 114 for ( ; i <= last; i++) { 115 if (test_bit(i, iop->uptodate)) { 116 plen -= (last - i + 1) * block_size; 117 last = i - 1; 118 break; 119 } 120 } 121 } 122 123 /* 124 * If the extent spans the block that contains the i_size, we need to 125 * handle both halves separately so that we properly zero data in the 126 * page cache for blocks that are entirely outside of i_size. 127 */ 128 if (orig_pos <= isize && orig_pos + length > isize) { 129 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 130 131 if (first <= end && last > end) 132 plen -= (last - end) * block_size; 133 } 134 135 *offp = poff; 136 *lenp = plen; 137 } 138 139 static void iomap_iop_set_range_uptodate(struct folio *folio, 140 struct iomap_page *iop, size_t off, size_t len) 141 { 142 struct inode *inode = folio->mapping->host; 143 unsigned first = off >> inode->i_blkbits; 144 unsigned last = (off + len - 1) >> inode->i_blkbits; 145 unsigned long flags; 146 147 spin_lock_irqsave(&iop->uptodate_lock, flags); 148 bitmap_set(iop->uptodate, first, last - first + 1); 149 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 150 folio_mark_uptodate(folio); 151 spin_unlock_irqrestore(&iop->uptodate_lock, flags); 152 } 153 154 static void iomap_set_range_uptodate(struct folio *folio, 155 struct iomap_page *iop, size_t off, size_t len) 156 { 157 if (folio_test_error(folio)) 158 return; 159 160 if (iop) 161 iomap_iop_set_range_uptodate(folio, iop, off, len); 162 else 163 folio_mark_uptodate(folio); 164 } 165 166 static void iomap_finish_folio_read(struct folio *folio, size_t offset, 167 size_t len, int error) 168 { 169 struct iomap_page *iop = to_iomap_page(folio); 170 171 if (unlikely(error)) { 172 folio_clear_uptodate(folio); 173 folio_set_error(folio); 174 } else { 175 iomap_set_range_uptodate(folio, iop, offset, len); 176 } 177 178 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 179 folio_unlock(folio); 180 } 181 182 static void iomap_read_end_io(struct bio *bio) 183 { 184 int error = blk_status_to_errno(bio->bi_status); 185 struct folio_iter fi; 186 187 bio_for_each_folio_all(fi, bio) 188 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 189 bio_put(bio); 190 } 191 192 struct iomap_readpage_ctx { 193 struct folio *cur_folio; 194 bool cur_folio_in_bio; 195 struct bio *bio; 196 struct readahead_control *rac; 197 }; 198 199 /** 200 * iomap_read_inline_data - copy inline data into the page cache 201 * @iter: iteration structure 202 * @folio: folio to copy to 203 * 204 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 205 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 206 * Returns zero for success to complete the read, or the usual negative errno. 207 */ 208 static int iomap_read_inline_data(const struct iomap_iter *iter, 209 struct folio *folio) 210 { 211 struct iomap_page *iop; 212 const struct iomap *iomap = iomap_iter_srcmap(iter); 213 size_t size = i_size_read(iter->inode) - iomap->offset; 214 size_t poff = offset_in_page(iomap->offset); 215 size_t offset = offset_in_folio(folio, iomap->offset); 216 void *addr; 217 218 if (folio_test_uptodate(folio)) 219 return 0; 220 221 if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 222 return -EIO; 223 if (WARN_ON_ONCE(size > PAGE_SIZE - 224 offset_in_page(iomap->inline_data))) 225 return -EIO; 226 if (WARN_ON_ONCE(size > iomap->length)) 227 return -EIO; 228 if (offset > 0) 229 iop = iomap_page_create(iter->inode, folio); 230 else 231 iop = to_iomap_page(folio); 232 233 addr = kmap_local_folio(folio, offset); 234 memcpy(addr, iomap->inline_data, size); 235 memset(addr + size, 0, PAGE_SIZE - poff - size); 236 kunmap_local(addr); 237 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 238 return 0; 239 } 240 241 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 242 loff_t pos) 243 { 244 const struct iomap *srcmap = iomap_iter_srcmap(iter); 245 246 return srcmap->type != IOMAP_MAPPED || 247 (srcmap->flags & IOMAP_F_NEW) || 248 pos >= i_size_read(iter->inode); 249 } 250 251 static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 252 struct iomap_readpage_ctx *ctx, loff_t offset) 253 { 254 const struct iomap *iomap = &iter->iomap; 255 loff_t pos = iter->pos + offset; 256 loff_t length = iomap_length(iter) - offset; 257 struct folio *folio = ctx->cur_folio; 258 struct iomap_page *iop; 259 loff_t orig_pos = pos; 260 size_t poff, plen; 261 sector_t sector; 262 263 if (iomap->type == IOMAP_INLINE) 264 return iomap_read_inline_data(iter, folio); 265 266 /* zero post-eof blocks as the page may be mapped */ 267 iop = iomap_page_create(iter->inode, folio); 268 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 269 if (plen == 0) 270 goto done; 271 272 if (iomap_block_needs_zeroing(iter, pos)) { 273 folio_zero_range(folio, poff, plen); 274 iomap_set_range_uptodate(folio, iop, poff, plen); 275 goto done; 276 } 277 278 ctx->cur_folio_in_bio = true; 279 if (iop) 280 atomic_add(plen, &iop->read_bytes_pending); 281 282 sector = iomap_sector(iomap, pos); 283 if (!ctx->bio || 284 bio_end_sector(ctx->bio) != sector || 285 !bio_add_folio(ctx->bio, folio, plen, poff)) { 286 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 287 gfp_t orig_gfp = gfp; 288 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 289 290 if (ctx->bio) 291 submit_bio(ctx->bio); 292 293 if (ctx->rac) /* same as readahead_gfp_mask */ 294 gfp |= __GFP_NORETRY | __GFP_NOWARN; 295 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); 296 /* 297 * If the bio_alloc fails, try it again for a single page to 298 * avoid having to deal with partial page reads. This emulates 299 * what do_mpage_readpage does. 300 */ 301 if (!ctx->bio) 302 ctx->bio = bio_alloc(orig_gfp, 1); 303 ctx->bio->bi_opf = REQ_OP_READ; 304 if (ctx->rac) 305 ctx->bio->bi_opf |= REQ_RAHEAD; 306 ctx->bio->bi_iter.bi_sector = sector; 307 bio_set_dev(ctx->bio, iomap->bdev); 308 ctx->bio->bi_end_io = iomap_read_end_io; 309 bio_add_folio(ctx->bio, folio, plen, poff); 310 } 311 312 done: 313 /* 314 * Move the caller beyond our range so that it keeps making progress. 315 * For that, we have to include any leading non-uptodate ranges, but 316 * we can skip trailing ones as they will be handled in the next 317 * iteration. 318 */ 319 return pos - orig_pos + plen; 320 } 321 322 int 323 iomap_readpage(struct page *page, const struct iomap_ops *ops) 324 { 325 struct folio *folio = page_folio(page); 326 struct iomap_iter iter = { 327 .inode = folio->mapping->host, 328 .pos = folio_pos(folio), 329 .len = folio_size(folio), 330 }; 331 struct iomap_readpage_ctx ctx = { 332 .cur_folio = folio, 333 }; 334 int ret; 335 336 trace_iomap_readpage(iter.inode, 1); 337 338 while ((ret = iomap_iter(&iter, ops)) > 0) 339 iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 340 341 if (ret < 0) 342 folio_set_error(folio); 343 344 if (ctx.bio) { 345 submit_bio(ctx.bio); 346 WARN_ON_ONCE(!ctx.cur_folio_in_bio); 347 } else { 348 WARN_ON_ONCE(ctx.cur_folio_in_bio); 349 folio_unlock(folio); 350 } 351 352 /* 353 * Just like mpage_readahead and block_read_full_page, we always 354 * return 0 and just mark the page as PageError on errors. This 355 * should be cleaned up throughout the stack eventually. 356 */ 357 return 0; 358 } 359 EXPORT_SYMBOL_GPL(iomap_readpage); 360 361 static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 362 struct iomap_readpage_ctx *ctx) 363 { 364 loff_t length = iomap_length(iter); 365 loff_t done, ret; 366 367 for (done = 0; done < length; done += ret) { 368 if (ctx->cur_folio && 369 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 370 if (!ctx->cur_folio_in_bio) 371 folio_unlock(ctx->cur_folio); 372 ctx->cur_folio = NULL; 373 } 374 if (!ctx->cur_folio) { 375 ctx->cur_folio = readahead_folio(ctx->rac); 376 ctx->cur_folio_in_bio = false; 377 } 378 ret = iomap_readpage_iter(iter, ctx, done); 379 if (ret <= 0) 380 return ret; 381 } 382 383 return done; 384 } 385 386 /** 387 * iomap_readahead - Attempt to read pages from a file. 388 * @rac: Describes the pages to be read. 389 * @ops: The operations vector for the filesystem. 390 * 391 * This function is for filesystems to call to implement their readahead 392 * address_space operation. 393 * 394 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 395 * blocks from disc), and may wait for it. The caller may be trying to 396 * access a different page, and so sleeping excessively should be avoided. 397 * It may allocate memory, but should avoid costly allocations. This 398 * function is called with memalloc_nofs set, so allocations will not cause 399 * the filesystem to be reentered. 400 */ 401 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 402 { 403 struct iomap_iter iter = { 404 .inode = rac->mapping->host, 405 .pos = readahead_pos(rac), 406 .len = readahead_length(rac), 407 }; 408 struct iomap_readpage_ctx ctx = { 409 .rac = rac, 410 }; 411 412 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 413 414 while (iomap_iter(&iter, ops) > 0) 415 iter.processed = iomap_readahead_iter(&iter, &ctx); 416 417 if (ctx.bio) 418 submit_bio(ctx.bio); 419 if (ctx.cur_folio) { 420 if (!ctx.cur_folio_in_bio) 421 folio_unlock(ctx.cur_folio); 422 } 423 } 424 EXPORT_SYMBOL_GPL(iomap_readahead); 425 426 /* 427 * iomap_is_partially_uptodate checks whether blocks within a page are 428 * uptodate or not. 429 * 430 * Returns true if all blocks which correspond to a file portion 431 * we want to read within the page are uptodate. 432 */ 433 int 434 iomap_is_partially_uptodate(struct page *page, unsigned long from, 435 unsigned long count) 436 { 437 struct folio *folio = page_folio(page); 438 struct iomap_page *iop = to_iomap_page(folio); 439 struct inode *inode = page->mapping->host; 440 unsigned len, first, last; 441 unsigned i; 442 443 /* Limit range to one page */ 444 len = min_t(unsigned, PAGE_SIZE - from, count); 445 446 /* First and last blocks in range within page */ 447 first = from >> inode->i_blkbits; 448 last = (from + len - 1) >> inode->i_blkbits; 449 450 if (iop) { 451 for (i = first; i <= last; i++) 452 if (!test_bit(i, iop->uptodate)) 453 return 0; 454 return 1; 455 } 456 457 return 0; 458 } 459 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 460 461 int 462 iomap_releasepage(struct page *page, gfp_t gfp_mask) 463 { 464 struct folio *folio = page_folio(page); 465 466 trace_iomap_releasepage(folio->mapping->host, folio_pos(folio), 467 folio_size(folio)); 468 469 /* 470 * mm accommodates an old ext3 case where clean pages might not have had 471 * the dirty bit cleared. Thus, it can send actual dirty pages to 472 * ->releasepage() via shrink_active_list(); skip those here. 473 */ 474 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 475 return 0; 476 iomap_page_release(folio); 477 return 1; 478 } 479 EXPORT_SYMBOL_GPL(iomap_releasepage); 480 481 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 482 { 483 trace_iomap_invalidatepage(folio->mapping->host, offset, len); 484 485 /* 486 * If we're invalidating the entire folio, clear the dirty state 487 * from it and release it to avoid unnecessary buildup of the LRU. 488 */ 489 if (offset == 0 && len == folio_size(folio)) { 490 WARN_ON_ONCE(folio_test_writeback(folio)); 491 folio_cancel_dirty(folio); 492 iomap_page_release(folio); 493 } else if (folio_test_large(folio)) { 494 /* Must release the iop so the page can be split */ 495 WARN_ON_ONCE(!folio_test_uptodate(folio) && 496 folio_test_dirty(folio)); 497 iomap_page_release(folio); 498 } 499 } 500 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 501 502 void iomap_invalidatepage(struct page *page, unsigned int offset, 503 unsigned int len) 504 { 505 iomap_invalidate_folio(page_folio(page), offset, len); 506 } 507 EXPORT_SYMBOL_GPL(iomap_invalidatepage); 508 509 #ifdef CONFIG_MIGRATION 510 int 511 iomap_migrate_page(struct address_space *mapping, struct page *newpage, 512 struct page *page, enum migrate_mode mode) 513 { 514 struct folio *folio = page_folio(page); 515 struct folio *newfolio = page_folio(newpage); 516 int ret; 517 518 ret = folio_migrate_mapping(mapping, newfolio, folio, 0); 519 if (ret != MIGRATEPAGE_SUCCESS) 520 return ret; 521 522 if (folio_test_private(folio)) 523 folio_attach_private(newfolio, folio_detach_private(folio)); 524 525 if (mode != MIGRATE_SYNC_NO_COPY) 526 folio_migrate_copy(newfolio, folio); 527 else 528 folio_migrate_flags(newfolio, folio); 529 return MIGRATEPAGE_SUCCESS; 530 } 531 EXPORT_SYMBOL_GPL(iomap_migrate_page); 532 #endif /* CONFIG_MIGRATION */ 533 534 static void 535 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 536 { 537 loff_t i_size = i_size_read(inode); 538 539 /* 540 * Only truncate newly allocated pages beyoned EOF, even if the 541 * write started inside the existing inode size. 542 */ 543 if (pos + len > i_size) 544 truncate_pagecache_range(inode, max(pos, i_size), pos + len); 545 } 546 547 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 548 size_t poff, size_t plen, const struct iomap *iomap) 549 { 550 struct bio_vec bvec; 551 struct bio bio; 552 553 bio_init(&bio, &bvec, 1); 554 bio.bi_opf = REQ_OP_READ; 555 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 556 bio_set_dev(&bio, iomap->bdev); 557 bio_add_folio(&bio, folio, plen, poff); 558 return submit_bio_wait(&bio); 559 } 560 561 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 562 size_t len, struct folio *folio) 563 { 564 const struct iomap *srcmap = iomap_iter_srcmap(iter); 565 struct iomap_page *iop = iomap_page_create(iter->inode, folio); 566 loff_t block_size = i_blocksize(iter->inode); 567 loff_t block_start = round_down(pos, block_size); 568 loff_t block_end = round_up(pos + len, block_size); 569 size_t from = offset_in_folio(folio, pos), to = from + len; 570 size_t poff, plen; 571 572 if (folio_test_uptodate(folio)) 573 return 0; 574 folio_clear_error(folio); 575 576 do { 577 iomap_adjust_read_range(iter->inode, folio, &block_start, 578 block_end - block_start, &poff, &plen); 579 if (plen == 0) 580 break; 581 582 if (!(iter->flags & IOMAP_UNSHARE) && 583 (from <= poff || from >= poff + plen) && 584 (to <= poff || to >= poff + plen)) 585 continue; 586 587 if (iomap_block_needs_zeroing(iter, block_start)) { 588 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 589 return -EIO; 590 folio_zero_segments(folio, poff, from, to, poff + plen); 591 } else { 592 int status = iomap_read_folio_sync(block_start, folio, 593 poff, plen, srcmap); 594 if (status) 595 return status; 596 } 597 iomap_set_range_uptodate(folio, iop, poff, plen); 598 } while ((block_start += plen) < block_end); 599 600 return 0; 601 } 602 603 static int iomap_write_begin_inline(const struct iomap_iter *iter, 604 struct folio *folio) 605 { 606 /* needs more work for the tailpacking case; disable for now */ 607 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 608 return -EIO; 609 return iomap_read_inline_data(iter, folio); 610 } 611 612 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 613 size_t len, struct folio **foliop) 614 { 615 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 616 const struct iomap *srcmap = iomap_iter_srcmap(iter); 617 struct folio *folio; 618 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 619 int status = 0; 620 621 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 622 if (srcmap != &iter->iomap) 623 BUG_ON(pos + len > srcmap->offset + srcmap->length); 624 625 if (fatal_signal_pending(current)) 626 return -EINTR; 627 628 if (!mapping_large_folio_support(iter->inode->i_mapping)) 629 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 630 631 if (page_ops && page_ops->page_prepare) { 632 status = page_ops->page_prepare(iter->inode, pos, len); 633 if (status) 634 return status; 635 } 636 637 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 638 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 639 if (!folio) { 640 status = -ENOMEM; 641 goto out_no_page; 642 } 643 if (pos + len > folio_pos(folio) + folio_size(folio)) 644 len = folio_pos(folio) + folio_size(folio) - pos; 645 646 if (srcmap->type == IOMAP_INLINE) 647 status = iomap_write_begin_inline(iter, folio); 648 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 649 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 650 else 651 status = __iomap_write_begin(iter, pos, len, folio); 652 653 if (unlikely(status)) 654 goto out_unlock; 655 656 *foliop = folio; 657 return 0; 658 659 out_unlock: 660 folio_unlock(folio); 661 folio_put(folio); 662 iomap_write_failed(iter->inode, pos, len); 663 664 out_no_page: 665 if (page_ops && page_ops->page_done) 666 page_ops->page_done(iter->inode, pos, 0, NULL); 667 return status; 668 } 669 670 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 671 size_t copied, struct folio *folio) 672 { 673 struct iomap_page *iop = to_iomap_page(folio); 674 flush_dcache_folio(folio); 675 676 /* 677 * The blocks that were entirely written will now be uptodate, so we 678 * don't have to worry about a readpage reading them and overwriting a 679 * partial write. However, if we've encountered a short write and only 680 * partially written into a block, it will not be marked uptodate, so a 681 * readpage might come in and destroy our partial write. 682 * 683 * Do the simplest thing and just treat any short write to a 684 * non-uptodate page as a zero-length write, and force the caller to 685 * redo the whole thing. 686 */ 687 if (unlikely(copied < len && !folio_test_uptodate(folio))) 688 return 0; 689 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 690 filemap_dirty_folio(inode->i_mapping, folio); 691 return copied; 692 } 693 694 static size_t iomap_write_end_inline(const struct iomap_iter *iter, 695 struct folio *folio, loff_t pos, size_t copied) 696 { 697 const struct iomap *iomap = &iter->iomap; 698 void *addr; 699 700 WARN_ON_ONCE(!folio_test_uptodate(folio)); 701 BUG_ON(!iomap_inline_data_valid(iomap)); 702 703 flush_dcache_folio(folio); 704 addr = kmap_local_folio(folio, pos); 705 memcpy(iomap_inline_data(iomap, pos), addr, copied); 706 kunmap_local(addr); 707 708 mark_inode_dirty(iter->inode); 709 return copied; 710 } 711 712 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 713 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 714 size_t copied, struct folio *folio) 715 { 716 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 717 const struct iomap *srcmap = iomap_iter_srcmap(iter); 718 loff_t old_size = iter->inode->i_size; 719 size_t ret; 720 721 if (srcmap->type == IOMAP_INLINE) { 722 ret = iomap_write_end_inline(iter, folio, pos, copied); 723 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 724 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 725 copied, &folio->page, NULL); 726 } else { 727 ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 728 } 729 730 /* 731 * Update the in-memory inode size after copying the data into the page 732 * cache. It's up to the file system to write the updated size to disk, 733 * preferably after I/O completion so that no stale data is exposed. 734 */ 735 if (pos + ret > old_size) { 736 i_size_write(iter->inode, pos + ret); 737 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 738 } 739 folio_unlock(folio); 740 741 if (old_size < pos) 742 pagecache_isize_extended(iter->inode, old_size, pos); 743 if (page_ops && page_ops->page_done) 744 page_ops->page_done(iter->inode, pos, ret, &folio->page); 745 folio_put(folio); 746 747 if (ret < len) 748 iomap_write_failed(iter->inode, pos, len); 749 return ret; 750 } 751 752 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 753 { 754 loff_t length = iomap_length(iter); 755 loff_t pos = iter->pos; 756 ssize_t written = 0; 757 long status = 0; 758 759 do { 760 struct folio *folio; 761 struct page *page; 762 unsigned long offset; /* Offset into pagecache page */ 763 unsigned long bytes; /* Bytes to write to page */ 764 size_t copied; /* Bytes copied from user */ 765 766 offset = offset_in_page(pos); 767 bytes = min_t(unsigned long, PAGE_SIZE - offset, 768 iov_iter_count(i)); 769 again: 770 if (bytes > length) 771 bytes = length; 772 773 /* 774 * Bring in the user page that we'll copy from _first_. 775 * Otherwise there's a nasty deadlock on copying from the 776 * same page as we're writing to, without it being marked 777 * up-to-date. 778 */ 779 if (unlikely(fault_in_iov_iter_readable(i, bytes))) { 780 status = -EFAULT; 781 break; 782 } 783 784 status = iomap_write_begin(iter, pos, bytes, &folio); 785 if (unlikely(status)) 786 break; 787 788 page = folio_file_page(folio, pos >> PAGE_SHIFT); 789 if (mapping_writably_mapped(iter->inode->i_mapping)) 790 flush_dcache_page(page); 791 792 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 793 794 status = iomap_write_end(iter, pos, bytes, copied, folio); 795 796 if (unlikely(copied != status)) 797 iov_iter_revert(i, copied - status); 798 799 cond_resched(); 800 if (unlikely(status == 0)) { 801 /* 802 * A short copy made iomap_write_end() reject the 803 * thing entirely. Might be memory poisoning 804 * halfway through, might be a race with munmap, 805 * might be severe memory pressure. 806 */ 807 if (copied) 808 bytes = copied; 809 goto again; 810 } 811 pos += status; 812 written += status; 813 length -= status; 814 815 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 816 } while (iov_iter_count(i) && length); 817 818 return written ? written : status; 819 } 820 821 ssize_t 822 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 823 const struct iomap_ops *ops) 824 { 825 struct iomap_iter iter = { 826 .inode = iocb->ki_filp->f_mapping->host, 827 .pos = iocb->ki_pos, 828 .len = iov_iter_count(i), 829 .flags = IOMAP_WRITE, 830 }; 831 int ret; 832 833 while ((ret = iomap_iter(&iter, ops)) > 0) 834 iter.processed = iomap_write_iter(&iter, i); 835 if (iter.pos == iocb->ki_pos) 836 return ret; 837 return iter.pos - iocb->ki_pos; 838 } 839 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 840 841 static loff_t iomap_unshare_iter(struct iomap_iter *iter) 842 { 843 struct iomap *iomap = &iter->iomap; 844 const struct iomap *srcmap = iomap_iter_srcmap(iter); 845 loff_t pos = iter->pos; 846 loff_t length = iomap_length(iter); 847 long status = 0; 848 loff_t written = 0; 849 850 /* don't bother with blocks that are not shared to start with */ 851 if (!(iomap->flags & IOMAP_F_SHARED)) 852 return length; 853 /* don't bother with holes or unwritten extents */ 854 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 855 return length; 856 857 do { 858 unsigned long offset = offset_in_page(pos); 859 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 860 struct folio *folio; 861 862 status = iomap_write_begin(iter, pos, bytes, &folio); 863 if (unlikely(status)) 864 return status; 865 866 status = iomap_write_end(iter, pos, bytes, bytes, folio); 867 if (WARN_ON_ONCE(status == 0)) 868 return -EIO; 869 870 cond_resched(); 871 872 pos += status; 873 written += status; 874 length -= status; 875 876 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 877 } while (length); 878 879 return written; 880 } 881 882 int 883 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 884 const struct iomap_ops *ops) 885 { 886 struct iomap_iter iter = { 887 .inode = inode, 888 .pos = pos, 889 .len = len, 890 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 891 }; 892 int ret; 893 894 while ((ret = iomap_iter(&iter, ops)) > 0) 895 iter.processed = iomap_unshare_iter(&iter); 896 return ret; 897 } 898 EXPORT_SYMBOL_GPL(iomap_file_unshare); 899 900 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 901 { 902 const struct iomap *srcmap = iomap_iter_srcmap(iter); 903 loff_t pos = iter->pos; 904 loff_t length = iomap_length(iter); 905 loff_t written = 0; 906 907 /* already zeroed? we're done. */ 908 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 909 return length; 910 911 do { 912 struct folio *folio; 913 int status; 914 size_t offset; 915 size_t bytes = min_t(u64, SIZE_MAX, length); 916 917 status = iomap_write_begin(iter, pos, bytes, &folio); 918 if (status) 919 return status; 920 921 offset = offset_in_folio(folio, pos); 922 if (bytes > folio_size(folio) - offset) 923 bytes = folio_size(folio) - offset; 924 925 folio_zero_range(folio, offset, bytes); 926 folio_mark_accessed(folio); 927 928 bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 929 if (WARN_ON_ONCE(bytes == 0)) 930 return -EIO; 931 932 pos += bytes; 933 length -= bytes; 934 written += bytes; 935 if (did_zero) 936 *did_zero = true; 937 } while (length > 0); 938 939 return written; 940 } 941 942 int 943 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 944 const struct iomap_ops *ops) 945 { 946 struct iomap_iter iter = { 947 .inode = inode, 948 .pos = pos, 949 .len = len, 950 .flags = IOMAP_ZERO, 951 }; 952 int ret; 953 954 while ((ret = iomap_iter(&iter, ops)) > 0) 955 iter.processed = iomap_zero_iter(&iter, did_zero); 956 return ret; 957 } 958 EXPORT_SYMBOL_GPL(iomap_zero_range); 959 960 int 961 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 962 const struct iomap_ops *ops) 963 { 964 unsigned int blocksize = i_blocksize(inode); 965 unsigned int off = pos & (blocksize - 1); 966 967 /* Block boundary? Nothing to do */ 968 if (!off) 969 return 0; 970 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 971 } 972 EXPORT_SYMBOL_GPL(iomap_truncate_page); 973 974 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 975 struct folio *folio) 976 { 977 loff_t length = iomap_length(iter); 978 int ret; 979 980 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 981 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 982 &iter->iomap); 983 if (ret) 984 return ret; 985 block_commit_write(&folio->page, 0, length); 986 } else { 987 WARN_ON_ONCE(!folio_test_uptodate(folio)); 988 folio_mark_dirty(folio); 989 } 990 991 return length; 992 } 993 994 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 995 { 996 struct iomap_iter iter = { 997 .inode = file_inode(vmf->vma->vm_file), 998 .flags = IOMAP_WRITE | IOMAP_FAULT, 999 }; 1000 struct folio *folio = page_folio(vmf->page); 1001 ssize_t ret; 1002 1003 folio_lock(folio); 1004 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1005 if (ret < 0) 1006 goto out_unlock; 1007 iter.pos = folio_pos(folio); 1008 iter.len = ret; 1009 while ((ret = iomap_iter(&iter, ops)) > 0) 1010 iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1011 1012 if (ret < 0) 1013 goto out_unlock; 1014 folio_wait_stable(folio); 1015 return VM_FAULT_LOCKED; 1016 out_unlock: 1017 folio_unlock(folio); 1018 return block_page_mkwrite_return(ret); 1019 } 1020 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1021 1022 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1023 size_t len, int error) 1024 { 1025 struct iomap_page *iop = to_iomap_page(folio); 1026 1027 if (error) { 1028 folio_set_error(folio); 1029 mapping_set_error(inode->i_mapping, error); 1030 } 1031 1032 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 1033 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1034 1035 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 1036 folio_end_writeback(folio); 1037 } 1038 1039 /* 1040 * We're now finished for good with this ioend structure. Update the page 1041 * state, release holds on bios, and finally free up memory. Do not use the 1042 * ioend after this. 1043 */ 1044 static u32 1045 iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1046 { 1047 struct inode *inode = ioend->io_inode; 1048 struct bio *bio = &ioend->io_inline_bio; 1049 struct bio *last = ioend->io_bio, *next; 1050 u64 start = bio->bi_iter.bi_sector; 1051 loff_t offset = ioend->io_offset; 1052 bool quiet = bio_flagged(bio, BIO_QUIET); 1053 u32 folio_count = 0; 1054 1055 for (bio = &ioend->io_inline_bio; bio; bio = next) { 1056 struct folio_iter fi; 1057 1058 /* 1059 * For the last bio, bi_private points to the ioend, so we 1060 * need to explicitly end the iteration here. 1061 */ 1062 if (bio == last) 1063 next = NULL; 1064 else 1065 next = bio->bi_private; 1066 1067 /* walk all folios in bio, ending page IO on them */ 1068 bio_for_each_folio_all(fi, bio) { 1069 iomap_finish_folio_write(inode, fi.folio, fi.length, 1070 error); 1071 folio_count++; 1072 } 1073 bio_put(bio); 1074 } 1075 /* The ioend has been freed by bio_put() */ 1076 1077 if (unlikely(error && !quiet)) { 1078 printk_ratelimited(KERN_ERR 1079 "%s: writeback error on inode %lu, offset %lld, sector %llu", 1080 inode->i_sb->s_id, inode->i_ino, offset, start); 1081 } 1082 return folio_count; 1083 } 1084 1085 /* 1086 * Ioend completion routine for merged bios. This can only be called from task 1087 * contexts as merged ioends can be of unbound length. Hence we have to break up 1088 * the writeback completions into manageable chunks to avoid long scheduler 1089 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1090 * good batch processing throughput without creating adverse scheduler latency 1091 * conditions. 1092 */ 1093 void 1094 iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1095 { 1096 struct list_head tmp; 1097 u32 completions; 1098 1099 might_sleep(); 1100 1101 list_replace_init(&ioend->io_list, &tmp); 1102 completions = iomap_finish_ioend(ioend, error); 1103 1104 while (!list_empty(&tmp)) { 1105 if (completions > IOEND_BATCH_SIZE * 8) { 1106 cond_resched(); 1107 completions = 0; 1108 } 1109 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1110 list_del_init(&ioend->io_list); 1111 completions += iomap_finish_ioend(ioend, error); 1112 } 1113 } 1114 EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1115 1116 /* 1117 * We can merge two adjacent ioends if they have the same set of work to do. 1118 */ 1119 static bool 1120 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1121 { 1122 if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1123 return false; 1124 if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1125 (next->io_flags & IOMAP_F_SHARED)) 1126 return false; 1127 if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1128 (next->io_type == IOMAP_UNWRITTEN)) 1129 return false; 1130 if (ioend->io_offset + ioend->io_size != next->io_offset) 1131 return false; 1132 /* 1133 * Do not merge physically discontiguous ioends. The filesystem 1134 * completion functions will have to iterate the physical 1135 * discontiguities even if we merge the ioends at a logical level, so 1136 * we don't gain anything by merging physical discontiguities here. 1137 * 1138 * We cannot use bio->bi_iter.bi_sector here as it is modified during 1139 * submission so does not point to the start sector of the bio at 1140 * completion. 1141 */ 1142 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1143 return false; 1144 return true; 1145 } 1146 1147 void 1148 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1149 { 1150 struct iomap_ioend *next; 1151 1152 INIT_LIST_HEAD(&ioend->io_list); 1153 1154 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1155 io_list))) { 1156 if (!iomap_ioend_can_merge(ioend, next)) 1157 break; 1158 list_move_tail(&next->io_list, &ioend->io_list); 1159 ioend->io_size += next->io_size; 1160 } 1161 } 1162 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1163 1164 static int 1165 iomap_ioend_compare(void *priv, const struct list_head *a, 1166 const struct list_head *b) 1167 { 1168 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1169 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1170 1171 if (ia->io_offset < ib->io_offset) 1172 return -1; 1173 if (ia->io_offset > ib->io_offset) 1174 return 1; 1175 return 0; 1176 } 1177 1178 void 1179 iomap_sort_ioends(struct list_head *ioend_list) 1180 { 1181 list_sort(NULL, ioend_list, iomap_ioend_compare); 1182 } 1183 EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1184 1185 static void iomap_writepage_end_bio(struct bio *bio) 1186 { 1187 struct iomap_ioend *ioend = bio->bi_private; 1188 1189 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1190 } 1191 1192 /* 1193 * Submit the final bio for an ioend. 1194 * 1195 * If @error is non-zero, it means that we have a situation where some part of 1196 * the submission process has failed after we've marked pages for writeback 1197 * and unlocked them. In this situation, we need to fail the bio instead of 1198 * submitting it. This typically only happens on a filesystem shutdown. 1199 */ 1200 static int 1201 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1202 int error) 1203 { 1204 ioend->io_bio->bi_private = ioend; 1205 ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1206 1207 if (wpc->ops->prepare_ioend) 1208 error = wpc->ops->prepare_ioend(ioend, error); 1209 if (error) { 1210 /* 1211 * If we're failing the IO now, just mark the ioend with an 1212 * error and finish it. This will run IO completion immediately 1213 * as there is only one reference to the ioend at this point in 1214 * time. 1215 */ 1216 ioend->io_bio->bi_status = errno_to_blk_status(error); 1217 bio_endio(ioend->io_bio); 1218 return error; 1219 } 1220 1221 submit_bio(ioend->io_bio); 1222 return 0; 1223 } 1224 1225 static struct iomap_ioend * 1226 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1227 loff_t offset, sector_t sector, struct writeback_control *wbc) 1228 { 1229 struct iomap_ioend *ioend; 1230 struct bio *bio; 1231 1232 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); 1233 bio_set_dev(bio, wpc->iomap.bdev); 1234 bio->bi_iter.bi_sector = sector; 1235 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1236 bio->bi_write_hint = inode->i_write_hint; 1237 wbc_init_bio(wbc, bio); 1238 1239 ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1240 INIT_LIST_HEAD(&ioend->io_list); 1241 ioend->io_type = wpc->iomap.type; 1242 ioend->io_flags = wpc->iomap.flags; 1243 ioend->io_inode = inode; 1244 ioend->io_size = 0; 1245 ioend->io_folios = 0; 1246 ioend->io_offset = offset; 1247 ioend->io_bio = bio; 1248 ioend->io_sector = sector; 1249 return ioend; 1250 } 1251 1252 /* 1253 * Allocate a new bio, and chain the old bio to the new one. 1254 * 1255 * Note that we have to perform the chaining in this unintuitive order 1256 * so that the bi_private linkage is set up in the right direction for the 1257 * traversal in iomap_finish_ioend(). 1258 */ 1259 static struct bio * 1260 iomap_chain_bio(struct bio *prev) 1261 { 1262 struct bio *new; 1263 1264 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); 1265 bio_copy_dev(new, prev);/* also copies over blkcg information */ 1266 new->bi_iter.bi_sector = bio_end_sector(prev); 1267 new->bi_opf = prev->bi_opf; 1268 new->bi_write_hint = prev->bi_write_hint; 1269 1270 bio_chain(prev, new); 1271 bio_get(prev); /* for iomap_finish_ioend */ 1272 submit_bio(prev); 1273 return new; 1274 } 1275 1276 static bool 1277 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1278 sector_t sector) 1279 { 1280 if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1281 (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1282 return false; 1283 if (wpc->iomap.type != wpc->ioend->io_type) 1284 return false; 1285 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1286 return false; 1287 if (sector != bio_end_sector(wpc->ioend->io_bio)) 1288 return false; 1289 /* 1290 * Limit ioend bio chain lengths to minimise IO completion latency. This 1291 * also prevents long tight loops ending page writeback on all the 1292 * folios in the ioend. 1293 */ 1294 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1295 return false; 1296 return true; 1297 } 1298 1299 /* 1300 * Test to see if we have an existing ioend structure that we could append to 1301 * first; otherwise finish off the current ioend and start another. 1302 */ 1303 static void 1304 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1305 struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1306 struct writeback_control *wbc, struct list_head *iolist) 1307 { 1308 sector_t sector = iomap_sector(&wpc->iomap, pos); 1309 unsigned len = i_blocksize(inode); 1310 size_t poff = offset_in_folio(folio, pos); 1311 1312 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1313 if (wpc->ioend) 1314 list_add(&wpc->ioend->io_list, iolist); 1315 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1316 } 1317 1318 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1319 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1320 bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1321 } 1322 1323 if (iop) 1324 atomic_add(len, &iop->write_bytes_pending); 1325 wpc->ioend->io_size += len; 1326 wbc_account_cgroup_owner(wbc, &folio->page, len); 1327 } 1328 1329 /* 1330 * We implement an immediate ioend submission policy here to avoid needing to 1331 * chain multiple ioends and hence nest mempool allocations which can violate 1332 * the forward progress guarantees we need to provide. The current ioend we're 1333 * adding blocks to is cached in the writepage context, and if the new block 1334 * doesn't append to the cached ioend, it will create a new ioend and cache that 1335 * instead. 1336 * 1337 * If a new ioend is created and cached, the old ioend is returned and queued 1338 * locally for submission once the entire page is processed or an error has been 1339 * detected. While ioends are submitted immediately after they are completed, 1340 * batching optimisations are provided by higher level block plugging. 1341 * 1342 * At the end of a writeback pass, there will be a cached ioend remaining on the 1343 * writepage context that the caller will need to submit. 1344 */ 1345 static int 1346 iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1347 struct writeback_control *wbc, struct inode *inode, 1348 struct folio *folio, u64 end_pos) 1349 { 1350 struct iomap_page *iop = iomap_page_create(inode, folio); 1351 struct iomap_ioend *ioend, *next; 1352 unsigned len = i_blocksize(inode); 1353 unsigned nblocks = i_blocks_per_folio(inode, folio); 1354 u64 pos = folio_pos(folio); 1355 int error = 0, count = 0, i; 1356 LIST_HEAD(submit_list); 1357 1358 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1359 1360 /* 1361 * Walk through the folio to find areas to write back. If we 1362 * run off the end of the current map or find the current map 1363 * invalid, grab a new one. 1364 */ 1365 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1366 if (iop && !test_bit(i, iop->uptodate)) 1367 continue; 1368 1369 error = wpc->ops->map_blocks(wpc, inode, pos); 1370 if (error) 1371 break; 1372 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 1373 continue; 1374 if (wpc->iomap.type == IOMAP_HOLE) 1375 continue; 1376 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1377 &submit_list); 1378 count++; 1379 } 1380 if (count) 1381 wpc->ioend->io_folios++; 1382 1383 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1384 WARN_ON_ONCE(!folio_test_locked(folio)); 1385 WARN_ON_ONCE(folio_test_writeback(folio)); 1386 WARN_ON_ONCE(folio_test_dirty(folio)); 1387 1388 /* 1389 * We cannot cancel the ioend directly here on error. We may have 1390 * already set other pages under writeback and hence we have to run I/O 1391 * completion to mark the error state of the pages under writeback 1392 * appropriately. 1393 */ 1394 if (unlikely(error)) { 1395 /* 1396 * Let the filesystem know what portion of the current page 1397 * failed to map. If the page hasn't been added to ioend, it 1398 * won't be affected by I/O completion and we must unlock it 1399 * now. 1400 */ 1401 if (wpc->ops->discard_folio) 1402 wpc->ops->discard_folio(folio, pos); 1403 if (!count) { 1404 folio_clear_uptodate(folio); 1405 folio_unlock(folio); 1406 goto done; 1407 } 1408 } 1409 1410 folio_start_writeback(folio); 1411 folio_unlock(folio); 1412 1413 /* 1414 * Preserve the original error if there was one; catch 1415 * submission errors here and propagate into subsequent ioend 1416 * submissions. 1417 */ 1418 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1419 int error2; 1420 1421 list_del_init(&ioend->io_list); 1422 error2 = iomap_submit_ioend(wpc, ioend, error); 1423 if (error2 && !error) 1424 error = error2; 1425 } 1426 1427 /* 1428 * We can end up here with no error and nothing to write only if we race 1429 * with a partial page truncate on a sub-page block sized filesystem. 1430 */ 1431 if (!count) 1432 folio_end_writeback(folio); 1433 done: 1434 mapping_set_error(folio->mapping, error); 1435 return error; 1436 } 1437 1438 /* 1439 * Write out a dirty page. 1440 * 1441 * For delalloc space on the page, we need to allocate space and flush it. 1442 * For unwritten space on the page, we need to start the conversion to 1443 * regular allocated space. 1444 */ 1445 static int 1446 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1447 { 1448 struct folio *folio = page_folio(page); 1449 struct iomap_writepage_ctx *wpc = data; 1450 struct inode *inode = folio->mapping->host; 1451 u64 end_pos, isize; 1452 1453 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1454 1455 /* 1456 * Refuse to write the folio out if we're called from reclaim context. 1457 * 1458 * This avoids stack overflows when called from deeply used stacks in 1459 * random callers for direct reclaim or memcg reclaim. We explicitly 1460 * allow reclaim from kswapd as the stack usage there is relatively low. 1461 * 1462 * This should never happen except in the case of a VM regression so 1463 * warn about it. 1464 */ 1465 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1466 PF_MEMALLOC)) 1467 goto redirty; 1468 1469 /* 1470 * Is this folio beyond the end of the file? 1471 * 1472 * The folio index is less than the end_index, adjust the end_pos 1473 * to the highest offset that this folio should represent. 1474 * ----------------------------------------------------- 1475 * | file mapping | <EOF> | 1476 * ----------------------------------------------------- 1477 * | Page ... | Page N-2 | Page N-1 | Page N | | 1478 * ^--------------------------------^----------|-------- 1479 * | desired writeback range | see else | 1480 * ---------------------------------^------------------| 1481 */ 1482 isize = i_size_read(inode); 1483 end_pos = folio_pos(folio) + folio_size(folio); 1484 if (end_pos > isize) { 1485 /* 1486 * Check whether the page to write out is beyond or straddles 1487 * i_size or not. 1488 * ------------------------------------------------------- 1489 * | file mapping | <EOF> | 1490 * ------------------------------------------------------- 1491 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1492 * ^--------------------------------^-----------|--------- 1493 * | | Straddles | 1494 * ---------------------------------^-----------|--------| 1495 */ 1496 size_t poff = offset_in_folio(folio, isize); 1497 pgoff_t end_index = isize >> PAGE_SHIFT; 1498 1499 /* 1500 * Skip the page if it's fully outside i_size, e.g. due to a 1501 * truncate operation that's in progress. We must redirty the 1502 * page so that reclaim stops reclaiming it. Otherwise 1503 * iomap_vm_releasepage() is called on it and gets confused. 1504 * 1505 * Note that the end_index is unsigned long. If the given 1506 * offset is greater than 16TB on a 32-bit system then if we 1507 * checked if the page is fully outside i_size with 1508 * "if (page->index >= end_index + 1)", "end_index + 1" would 1509 * overflow and evaluate to 0. Hence this page would be 1510 * redirtied and written out repeatedly, which would result in 1511 * an infinite loop; the user program performing this operation 1512 * would hang. Instead, we can detect this situation by 1513 * checking if the page is totally beyond i_size or if its 1514 * offset is just equal to the EOF. 1515 */ 1516 if (folio->index > end_index || 1517 (folio->index == end_index && poff == 0)) 1518 goto redirty; 1519 1520 /* 1521 * The page straddles i_size. It must be zeroed out on each 1522 * and every writepage invocation because it may be mmapped. 1523 * "A file is mapped in multiples of the page size. For a file 1524 * that is not a multiple of the page size, the remaining 1525 * memory is zeroed when mapped, and writes to that region are 1526 * not written out to the file." 1527 */ 1528 folio_zero_segment(folio, poff, folio_size(folio)); 1529 end_pos = isize; 1530 } 1531 1532 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1533 1534 redirty: 1535 folio_redirty_for_writepage(wbc, folio); 1536 folio_unlock(folio); 1537 return 0; 1538 } 1539 1540 int 1541 iomap_writepage(struct page *page, struct writeback_control *wbc, 1542 struct iomap_writepage_ctx *wpc, 1543 const struct iomap_writeback_ops *ops) 1544 { 1545 int ret; 1546 1547 wpc->ops = ops; 1548 ret = iomap_do_writepage(page, wbc, wpc); 1549 if (!wpc->ioend) 1550 return ret; 1551 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1552 } 1553 EXPORT_SYMBOL_GPL(iomap_writepage); 1554 1555 int 1556 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1557 struct iomap_writepage_ctx *wpc, 1558 const struct iomap_writeback_ops *ops) 1559 { 1560 int ret; 1561 1562 wpc->ops = ops; 1563 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1564 if (!wpc->ioend) 1565 return ret; 1566 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1567 } 1568 EXPORT_SYMBOL_GPL(iomap_writepages); 1569 1570 static int __init iomap_init(void) 1571 { 1572 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1573 offsetof(struct iomap_ioend, io_inline_bio), 1574 BIOSET_NEED_BVECS); 1575 } 1576 fs_initcall(iomap_init); 1577