1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2019 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/iomap.h> 10 #include <linux/pagemap.h> 11 #include <linux/uio.h> 12 #include <linux/buffer_head.h> 13 #include <linux/dax.h> 14 #include <linux/writeback.h> 15 #include <linux/list_sort.h> 16 #include <linux/swap.h> 17 #include <linux/bio.h> 18 #include <linux/sched/signal.h> 19 #include <linux/migrate.h> 20 #include "trace.h" 21 22 #include "../internal.h" 23 24 /* 25 * Structure allocated for each folio when block size < folio size 26 * to track sub-folio uptodate status and I/O completions. 27 */ 28 struct iomap_page { 29 atomic_t read_bytes_pending; 30 atomic_t write_bytes_pending; 31 spinlock_t uptodate_lock; 32 unsigned long uptodate[]; 33 }; 34 35 static inline struct iomap_page *to_iomap_page(struct folio *folio) 36 { 37 if (folio_test_private(folio)) 38 return folio_get_private(folio); 39 return NULL; 40 } 41 42 static struct bio_set iomap_ioend_bioset; 43 44 static struct iomap_page * 45 iomap_page_create(struct inode *inode, struct folio *folio) 46 { 47 struct iomap_page *iop = to_iomap_page(folio); 48 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 49 50 if (iop || nr_blocks <= 1) 51 return iop; 52 53 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 54 GFP_NOFS | __GFP_NOFAIL); 55 spin_lock_init(&iop->uptodate_lock); 56 if (folio_test_uptodate(folio)) 57 bitmap_fill(iop->uptodate, nr_blocks); 58 folio_attach_private(folio, iop); 59 return iop; 60 } 61 62 static void iomap_page_release(struct folio *folio) 63 { 64 struct iomap_page *iop = folio_detach_private(folio); 65 struct inode *inode = folio->mapping->host; 66 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 67 68 if (!iop) 69 return; 70 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 71 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 72 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 73 folio_test_uptodate(folio)); 74 kfree(iop); 75 } 76 77 /* 78 * Calculate the range inside the folio that we actually need to read. 79 */ 80 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 81 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 82 { 83 struct iomap_page *iop = to_iomap_page(folio); 84 loff_t orig_pos = *pos; 85 loff_t isize = i_size_read(inode); 86 unsigned block_bits = inode->i_blkbits; 87 unsigned block_size = (1 << block_bits); 88 size_t poff = offset_in_folio(folio, *pos); 89 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 90 unsigned first = poff >> block_bits; 91 unsigned last = (poff + plen - 1) >> block_bits; 92 93 /* 94 * If the block size is smaller than the page size, we need to check the 95 * per-block uptodate status and adjust the offset and length if needed 96 * to avoid reading in already uptodate ranges. 97 */ 98 if (iop) { 99 unsigned int i; 100 101 /* move forward for each leading block marked uptodate */ 102 for (i = first; i <= last; i++) { 103 if (!test_bit(i, iop->uptodate)) 104 break; 105 *pos += block_size; 106 poff += block_size; 107 plen -= block_size; 108 first++; 109 } 110 111 /* truncate len if we find any trailing uptodate block(s) */ 112 for ( ; i <= last; i++) { 113 if (test_bit(i, iop->uptodate)) { 114 plen -= (last - i + 1) * block_size; 115 last = i - 1; 116 break; 117 } 118 } 119 } 120 121 /* 122 * If the extent spans the block that contains the i_size, we need to 123 * handle both halves separately so that we properly zero data in the 124 * page cache for blocks that are entirely outside of i_size. 125 */ 126 if (orig_pos <= isize && orig_pos + length > isize) { 127 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 128 129 if (first <= end && last > end) 130 plen -= (last - end) * block_size; 131 } 132 133 *offp = poff; 134 *lenp = plen; 135 } 136 137 static void iomap_iop_set_range_uptodate(struct folio *folio, 138 struct iomap_page *iop, size_t off, size_t len) 139 { 140 struct inode *inode = folio->mapping->host; 141 unsigned first = off >> inode->i_blkbits; 142 unsigned last = (off + len - 1) >> inode->i_blkbits; 143 unsigned long flags; 144 145 spin_lock_irqsave(&iop->uptodate_lock, flags); 146 bitmap_set(iop->uptodate, first, last - first + 1); 147 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 148 folio_mark_uptodate(folio); 149 spin_unlock_irqrestore(&iop->uptodate_lock, flags); 150 } 151 152 static void iomap_set_range_uptodate(struct folio *folio, 153 struct iomap_page *iop, size_t off, size_t len) 154 { 155 if (folio_test_error(folio)) 156 return; 157 158 if (iop) 159 iomap_iop_set_range_uptodate(folio, iop, off, len); 160 else 161 folio_mark_uptodate(folio); 162 } 163 164 static void iomap_finish_folio_read(struct folio *folio, size_t offset, 165 size_t len, int error) 166 { 167 struct iomap_page *iop = to_iomap_page(folio); 168 169 if (unlikely(error)) { 170 folio_clear_uptodate(folio); 171 folio_set_error(folio); 172 } else { 173 iomap_set_range_uptodate(folio, iop, offset, len); 174 } 175 176 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 177 folio_unlock(folio); 178 } 179 180 static void iomap_read_end_io(struct bio *bio) 181 { 182 int error = blk_status_to_errno(bio->bi_status); 183 struct folio_iter fi; 184 185 bio_for_each_folio_all(fi, bio) 186 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 187 bio_put(bio); 188 } 189 190 struct iomap_readpage_ctx { 191 struct folio *cur_folio; 192 bool cur_folio_in_bio; 193 struct bio *bio; 194 struct readahead_control *rac; 195 }; 196 197 /** 198 * iomap_read_inline_data - copy inline data into the page cache 199 * @iter: iteration structure 200 * @folio: folio to copy to 201 * 202 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 203 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 204 * Returns zero for success to complete the read, or the usual negative errno. 205 */ 206 static int iomap_read_inline_data(const struct iomap_iter *iter, 207 struct folio *folio) 208 { 209 struct iomap_page *iop; 210 const struct iomap *iomap = iomap_iter_srcmap(iter); 211 size_t size = i_size_read(iter->inode) - iomap->offset; 212 size_t poff = offset_in_page(iomap->offset); 213 size_t offset = offset_in_folio(folio, iomap->offset); 214 void *addr; 215 216 if (folio_test_uptodate(folio)) 217 return 0; 218 219 if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 220 return -EIO; 221 if (WARN_ON_ONCE(size > PAGE_SIZE - 222 offset_in_page(iomap->inline_data))) 223 return -EIO; 224 if (WARN_ON_ONCE(size > iomap->length)) 225 return -EIO; 226 if (offset > 0) 227 iop = iomap_page_create(iter->inode, folio); 228 else 229 iop = to_iomap_page(folio); 230 231 addr = kmap_local_folio(folio, offset); 232 memcpy(addr, iomap->inline_data, size); 233 memset(addr + size, 0, PAGE_SIZE - poff - size); 234 kunmap_local(addr); 235 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 236 return 0; 237 } 238 239 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 240 loff_t pos) 241 { 242 const struct iomap *srcmap = iomap_iter_srcmap(iter); 243 244 return srcmap->type != IOMAP_MAPPED || 245 (srcmap->flags & IOMAP_F_NEW) || 246 pos >= i_size_read(iter->inode); 247 } 248 249 static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 250 struct iomap_readpage_ctx *ctx, loff_t offset) 251 { 252 const struct iomap *iomap = &iter->iomap; 253 loff_t pos = iter->pos + offset; 254 loff_t length = iomap_length(iter) - offset; 255 struct folio *folio = ctx->cur_folio; 256 struct iomap_page *iop; 257 loff_t orig_pos = pos; 258 size_t poff, plen; 259 sector_t sector; 260 261 if (iomap->type == IOMAP_INLINE) 262 return iomap_read_inline_data(iter, folio); 263 264 /* zero post-eof blocks as the page may be mapped */ 265 iop = iomap_page_create(iter->inode, folio); 266 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 267 if (plen == 0) 268 goto done; 269 270 if (iomap_block_needs_zeroing(iter, pos)) { 271 folio_zero_range(folio, poff, plen); 272 iomap_set_range_uptodate(folio, iop, poff, plen); 273 goto done; 274 } 275 276 ctx->cur_folio_in_bio = true; 277 if (iop) 278 atomic_add(plen, &iop->read_bytes_pending); 279 280 sector = iomap_sector(iomap, pos); 281 if (!ctx->bio || 282 bio_end_sector(ctx->bio) != sector || 283 !bio_add_folio(ctx->bio, folio, plen, poff)) { 284 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 285 gfp_t orig_gfp = gfp; 286 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 287 288 if (ctx->bio) 289 submit_bio(ctx->bio); 290 291 if (ctx->rac) /* same as readahead_gfp_mask */ 292 gfp |= __GFP_NORETRY | __GFP_NOWARN; 293 ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); 294 /* 295 * If the bio_alloc fails, try it again for a single page to 296 * avoid having to deal with partial page reads. This emulates 297 * what do_mpage_readpage does. 298 */ 299 if (!ctx->bio) 300 ctx->bio = bio_alloc(orig_gfp, 1); 301 ctx->bio->bi_opf = REQ_OP_READ; 302 if (ctx->rac) 303 ctx->bio->bi_opf |= REQ_RAHEAD; 304 ctx->bio->bi_iter.bi_sector = sector; 305 bio_set_dev(ctx->bio, iomap->bdev); 306 ctx->bio->bi_end_io = iomap_read_end_io; 307 bio_add_folio(ctx->bio, folio, plen, poff); 308 } 309 310 done: 311 /* 312 * Move the caller beyond our range so that it keeps making progress. 313 * For that, we have to include any leading non-uptodate ranges, but 314 * we can skip trailing ones as they will be handled in the next 315 * iteration. 316 */ 317 return pos - orig_pos + plen; 318 } 319 320 int 321 iomap_readpage(struct page *page, const struct iomap_ops *ops) 322 { 323 struct folio *folio = page_folio(page); 324 struct iomap_iter iter = { 325 .inode = folio->mapping->host, 326 .pos = folio_pos(folio), 327 .len = folio_size(folio), 328 }; 329 struct iomap_readpage_ctx ctx = { 330 .cur_folio = folio, 331 }; 332 int ret; 333 334 trace_iomap_readpage(iter.inode, 1); 335 336 while ((ret = iomap_iter(&iter, ops)) > 0) 337 iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 338 339 if (ret < 0) 340 folio_set_error(folio); 341 342 if (ctx.bio) { 343 submit_bio(ctx.bio); 344 WARN_ON_ONCE(!ctx.cur_folio_in_bio); 345 } else { 346 WARN_ON_ONCE(ctx.cur_folio_in_bio); 347 folio_unlock(folio); 348 } 349 350 /* 351 * Just like mpage_readahead and block_read_full_page, we always 352 * return 0 and just mark the page as PageError on errors. This 353 * should be cleaned up throughout the stack eventually. 354 */ 355 return 0; 356 } 357 EXPORT_SYMBOL_GPL(iomap_readpage); 358 359 static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 360 struct iomap_readpage_ctx *ctx) 361 { 362 loff_t length = iomap_length(iter); 363 loff_t done, ret; 364 365 for (done = 0; done < length; done += ret) { 366 if (ctx->cur_folio && 367 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 368 if (!ctx->cur_folio_in_bio) 369 folio_unlock(ctx->cur_folio); 370 ctx->cur_folio = NULL; 371 } 372 if (!ctx->cur_folio) { 373 ctx->cur_folio = readahead_folio(ctx->rac); 374 ctx->cur_folio_in_bio = false; 375 } 376 ret = iomap_readpage_iter(iter, ctx, done); 377 if (ret <= 0) 378 return ret; 379 } 380 381 return done; 382 } 383 384 /** 385 * iomap_readahead - Attempt to read pages from a file. 386 * @rac: Describes the pages to be read. 387 * @ops: The operations vector for the filesystem. 388 * 389 * This function is for filesystems to call to implement their readahead 390 * address_space operation. 391 * 392 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 393 * blocks from disc), and may wait for it. The caller may be trying to 394 * access a different page, and so sleeping excessively should be avoided. 395 * It may allocate memory, but should avoid costly allocations. This 396 * function is called with memalloc_nofs set, so allocations will not cause 397 * the filesystem to be reentered. 398 */ 399 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 400 { 401 struct iomap_iter iter = { 402 .inode = rac->mapping->host, 403 .pos = readahead_pos(rac), 404 .len = readahead_length(rac), 405 }; 406 struct iomap_readpage_ctx ctx = { 407 .rac = rac, 408 }; 409 410 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 411 412 while (iomap_iter(&iter, ops) > 0) 413 iter.processed = iomap_readahead_iter(&iter, &ctx); 414 415 if (ctx.bio) 416 submit_bio(ctx.bio); 417 if (ctx.cur_folio) { 418 if (!ctx.cur_folio_in_bio) 419 folio_unlock(ctx.cur_folio); 420 } 421 } 422 EXPORT_SYMBOL_GPL(iomap_readahead); 423 424 /* 425 * iomap_is_partially_uptodate checks whether blocks within a page are 426 * uptodate or not. 427 * 428 * Returns true if all blocks which correspond to a file portion 429 * we want to read within the page are uptodate. 430 */ 431 int 432 iomap_is_partially_uptodate(struct page *page, unsigned long from, 433 unsigned long count) 434 { 435 struct folio *folio = page_folio(page); 436 struct iomap_page *iop = to_iomap_page(folio); 437 struct inode *inode = page->mapping->host; 438 unsigned len, first, last; 439 unsigned i; 440 441 /* Limit range to one page */ 442 len = min_t(unsigned, PAGE_SIZE - from, count); 443 444 /* First and last blocks in range within page */ 445 first = from >> inode->i_blkbits; 446 last = (from + len - 1) >> inode->i_blkbits; 447 448 if (iop) { 449 for (i = first; i <= last; i++) 450 if (!test_bit(i, iop->uptodate)) 451 return 0; 452 return 1; 453 } 454 455 return 0; 456 } 457 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 458 459 int 460 iomap_releasepage(struct page *page, gfp_t gfp_mask) 461 { 462 struct folio *folio = page_folio(page); 463 464 trace_iomap_releasepage(folio->mapping->host, folio_pos(folio), 465 folio_size(folio)); 466 467 /* 468 * mm accommodates an old ext3 case where clean pages might not have had 469 * the dirty bit cleared. Thus, it can send actual dirty pages to 470 * ->releasepage() via shrink_active_list(); skip those here. 471 */ 472 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 473 return 0; 474 iomap_page_release(folio); 475 return 1; 476 } 477 EXPORT_SYMBOL_GPL(iomap_releasepage); 478 479 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 480 { 481 trace_iomap_invalidatepage(folio->mapping->host, offset, len); 482 483 /* 484 * If we're invalidating the entire folio, clear the dirty state 485 * from it and release it to avoid unnecessary buildup of the LRU. 486 */ 487 if (offset == 0 && len == folio_size(folio)) { 488 WARN_ON_ONCE(folio_test_writeback(folio)); 489 folio_cancel_dirty(folio); 490 iomap_page_release(folio); 491 } else if (folio_test_large(folio)) { 492 /* Must release the iop so the page can be split */ 493 WARN_ON_ONCE(!folio_test_uptodate(folio) && 494 folio_test_dirty(folio)); 495 iomap_page_release(folio); 496 } 497 } 498 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 499 500 void iomap_invalidatepage(struct page *page, unsigned int offset, 501 unsigned int len) 502 { 503 iomap_invalidate_folio(page_folio(page), offset, len); 504 } 505 EXPORT_SYMBOL_GPL(iomap_invalidatepage); 506 507 #ifdef CONFIG_MIGRATION 508 int 509 iomap_migrate_page(struct address_space *mapping, struct page *newpage, 510 struct page *page, enum migrate_mode mode) 511 { 512 struct folio *folio = page_folio(page); 513 struct folio *newfolio = page_folio(newpage); 514 int ret; 515 516 ret = folio_migrate_mapping(mapping, newfolio, folio, 0); 517 if (ret != MIGRATEPAGE_SUCCESS) 518 return ret; 519 520 if (folio_test_private(folio)) 521 folio_attach_private(newfolio, folio_detach_private(folio)); 522 523 if (mode != MIGRATE_SYNC_NO_COPY) 524 folio_migrate_copy(newfolio, folio); 525 else 526 folio_migrate_flags(newfolio, folio); 527 return MIGRATEPAGE_SUCCESS; 528 } 529 EXPORT_SYMBOL_GPL(iomap_migrate_page); 530 #endif /* CONFIG_MIGRATION */ 531 532 static void 533 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 534 { 535 loff_t i_size = i_size_read(inode); 536 537 /* 538 * Only truncate newly allocated pages beyoned EOF, even if the 539 * write started inside the existing inode size. 540 */ 541 if (pos + len > i_size) 542 truncate_pagecache_range(inode, max(pos, i_size), pos + len); 543 } 544 545 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 546 size_t poff, size_t plen, const struct iomap *iomap) 547 { 548 struct bio_vec bvec; 549 struct bio bio; 550 551 bio_init(&bio, &bvec, 1); 552 bio.bi_opf = REQ_OP_READ; 553 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 554 bio_set_dev(&bio, iomap->bdev); 555 bio_add_folio(&bio, folio, plen, poff); 556 return submit_bio_wait(&bio); 557 } 558 559 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 560 size_t len, struct folio *folio) 561 { 562 const struct iomap *srcmap = iomap_iter_srcmap(iter); 563 struct iomap_page *iop = iomap_page_create(iter->inode, folio); 564 loff_t block_size = i_blocksize(iter->inode); 565 loff_t block_start = round_down(pos, block_size); 566 loff_t block_end = round_up(pos + len, block_size); 567 size_t from = offset_in_folio(folio, pos), to = from + len; 568 size_t poff, plen; 569 570 if (folio_test_uptodate(folio)) 571 return 0; 572 folio_clear_error(folio); 573 574 do { 575 iomap_adjust_read_range(iter->inode, folio, &block_start, 576 block_end - block_start, &poff, &plen); 577 if (plen == 0) 578 break; 579 580 if (!(iter->flags & IOMAP_UNSHARE) && 581 (from <= poff || from >= poff + plen) && 582 (to <= poff || to >= poff + plen)) 583 continue; 584 585 if (iomap_block_needs_zeroing(iter, block_start)) { 586 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 587 return -EIO; 588 folio_zero_segments(folio, poff, from, to, poff + plen); 589 } else { 590 int status = iomap_read_folio_sync(block_start, folio, 591 poff, plen, srcmap); 592 if (status) 593 return status; 594 } 595 iomap_set_range_uptodate(folio, iop, poff, plen); 596 } while ((block_start += plen) < block_end); 597 598 return 0; 599 } 600 601 static int iomap_write_begin_inline(const struct iomap_iter *iter, 602 struct folio *folio) 603 { 604 /* needs more work for the tailpacking case; disable for now */ 605 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 606 return -EIO; 607 return iomap_read_inline_data(iter, folio); 608 } 609 610 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 611 size_t len, struct folio **foliop) 612 { 613 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 614 const struct iomap *srcmap = iomap_iter_srcmap(iter); 615 struct folio *folio; 616 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 617 int status = 0; 618 619 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 620 if (srcmap != &iter->iomap) 621 BUG_ON(pos + len > srcmap->offset + srcmap->length); 622 623 if (fatal_signal_pending(current)) 624 return -EINTR; 625 626 if (!mapping_large_folio_support(iter->inode->i_mapping)) 627 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 628 629 if (page_ops && page_ops->page_prepare) { 630 status = page_ops->page_prepare(iter->inode, pos, len); 631 if (status) 632 return status; 633 } 634 635 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 636 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 637 if (!folio) { 638 status = -ENOMEM; 639 goto out_no_page; 640 } 641 if (pos + len > folio_pos(folio) + folio_size(folio)) 642 len = folio_pos(folio) + folio_size(folio) - pos; 643 644 if (srcmap->type == IOMAP_INLINE) 645 status = iomap_write_begin_inline(iter, folio); 646 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 647 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 648 else 649 status = __iomap_write_begin(iter, pos, len, folio); 650 651 if (unlikely(status)) 652 goto out_unlock; 653 654 *foliop = folio; 655 return 0; 656 657 out_unlock: 658 folio_unlock(folio); 659 folio_put(folio); 660 iomap_write_failed(iter->inode, pos, len); 661 662 out_no_page: 663 if (page_ops && page_ops->page_done) 664 page_ops->page_done(iter->inode, pos, 0, NULL); 665 return status; 666 } 667 668 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 669 size_t copied, struct folio *folio) 670 { 671 struct iomap_page *iop = to_iomap_page(folio); 672 flush_dcache_folio(folio); 673 674 /* 675 * The blocks that were entirely written will now be uptodate, so we 676 * don't have to worry about a readpage reading them and overwriting a 677 * partial write. However, if we've encountered a short write and only 678 * partially written into a block, it will not be marked uptodate, so a 679 * readpage might come in and destroy our partial write. 680 * 681 * Do the simplest thing and just treat any short write to a 682 * non-uptodate page as a zero-length write, and force the caller to 683 * redo the whole thing. 684 */ 685 if (unlikely(copied < len && !folio_test_uptodate(folio))) 686 return 0; 687 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 688 filemap_dirty_folio(inode->i_mapping, folio); 689 return copied; 690 } 691 692 static size_t iomap_write_end_inline(const struct iomap_iter *iter, 693 struct folio *folio, loff_t pos, size_t copied) 694 { 695 const struct iomap *iomap = &iter->iomap; 696 void *addr; 697 698 WARN_ON_ONCE(!folio_test_uptodate(folio)); 699 BUG_ON(!iomap_inline_data_valid(iomap)); 700 701 flush_dcache_folio(folio); 702 addr = kmap_local_folio(folio, pos); 703 memcpy(iomap_inline_data(iomap, pos), addr, copied); 704 kunmap_local(addr); 705 706 mark_inode_dirty(iter->inode); 707 return copied; 708 } 709 710 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 711 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 712 size_t copied, struct folio *folio) 713 { 714 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 715 const struct iomap *srcmap = iomap_iter_srcmap(iter); 716 loff_t old_size = iter->inode->i_size; 717 size_t ret; 718 719 if (srcmap->type == IOMAP_INLINE) { 720 ret = iomap_write_end_inline(iter, folio, pos, copied); 721 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 722 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 723 copied, &folio->page, NULL); 724 } else { 725 ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 726 } 727 728 /* 729 * Update the in-memory inode size after copying the data into the page 730 * cache. It's up to the file system to write the updated size to disk, 731 * preferably after I/O completion so that no stale data is exposed. 732 */ 733 if (pos + ret > old_size) { 734 i_size_write(iter->inode, pos + ret); 735 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 736 } 737 folio_unlock(folio); 738 739 if (old_size < pos) 740 pagecache_isize_extended(iter->inode, old_size, pos); 741 if (page_ops && page_ops->page_done) 742 page_ops->page_done(iter->inode, pos, ret, &folio->page); 743 folio_put(folio); 744 745 if (ret < len) 746 iomap_write_failed(iter->inode, pos, len); 747 return ret; 748 } 749 750 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 751 { 752 loff_t length = iomap_length(iter); 753 loff_t pos = iter->pos; 754 ssize_t written = 0; 755 long status = 0; 756 757 do { 758 struct folio *folio; 759 struct page *page; 760 unsigned long offset; /* Offset into pagecache page */ 761 unsigned long bytes; /* Bytes to write to page */ 762 size_t copied; /* Bytes copied from user */ 763 764 offset = offset_in_page(pos); 765 bytes = min_t(unsigned long, PAGE_SIZE - offset, 766 iov_iter_count(i)); 767 again: 768 if (bytes > length) 769 bytes = length; 770 771 /* 772 * Bring in the user page that we'll copy from _first_. 773 * Otherwise there's a nasty deadlock on copying from the 774 * same page as we're writing to, without it being marked 775 * up-to-date. 776 */ 777 if (unlikely(fault_in_iov_iter_readable(i, bytes))) { 778 status = -EFAULT; 779 break; 780 } 781 782 status = iomap_write_begin(iter, pos, bytes, &folio); 783 if (unlikely(status)) 784 break; 785 786 page = folio_file_page(folio, pos >> PAGE_SHIFT); 787 if (mapping_writably_mapped(iter->inode->i_mapping)) 788 flush_dcache_page(page); 789 790 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 791 792 status = iomap_write_end(iter, pos, bytes, copied, folio); 793 794 if (unlikely(copied != status)) 795 iov_iter_revert(i, copied - status); 796 797 cond_resched(); 798 if (unlikely(status == 0)) { 799 /* 800 * A short copy made iomap_write_end() reject the 801 * thing entirely. Might be memory poisoning 802 * halfway through, might be a race with munmap, 803 * might be severe memory pressure. 804 */ 805 if (copied) 806 bytes = copied; 807 goto again; 808 } 809 pos += status; 810 written += status; 811 length -= status; 812 813 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 814 } while (iov_iter_count(i) && length); 815 816 return written ? written : status; 817 } 818 819 ssize_t 820 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 821 const struct iomap_ops *ops) 822 { 823 struct iomap_iter iter = { 824 .inode = iocb->ki_filp->f_mapping->host, 825 .pos = iocb->ki_pos, 826 .len = iov_iter_count(i), 827 .flags = IOMAP_WRITE, 828 }; 829 int ret; 830 831 while ((ret = iomap_iter(&iter, ops)) > 0) 832 iter.processed = iomap_write_iter(&iter, i); 833 if (iter.pos == iocb->ki_pos) 834 return ret; 835 return iter.pos - iocb->ki_pos; 836 } 837 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 838 839 static loff_t iomap_unshare_iter(struct iomap_iter *iter) 840 { 841 struct iomap *iomap = &iter->iomap; 842 const struct iomap *srcmap = iomap_iter_srcmap(iter); 843 loff_t pos = iter->pos; 844 loff_t length = iomap_length(iter); 845 long status = 0; 846 loff_t written = 0; 847 848 /* don't bother with blocks that are not shared to start with */ 849 if (!(iomap->flags & IOMAP_F_SHARED)) 850 return length; 851 /* don't bother with holes or unwritten extents */ 852 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 853 return length; 854 855 do { 856 unsigned long offset = offset_in_page(pos); 857 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 858 struct folio *folio; 859 860 status = iomap_write_begin(iter, pos, bytes, &folio); 861 if (unlikely(status)) 862 return status; 863 864 status = iomap_write_end(iter, pos, bytes, bytes, folio); 865 if (WARN_ON_ONCE(status == 0)) 866 return -EIO; 867 868 cond_resched(); 869 870 pos += status; 871 written += status; 872 length -= status; 873 874 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 875 } while (length); 876 877 return written; 878 } 879 880 int 881 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 882 const struct iomap_ops *ops) 883 { 884 struct iomap_iter iter = { 885 .inode = inode, 886 .pos = pos, 887 .len = len, 888 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 889 }; 890 int ret; 891 892 while ((ret = iomap_iter(&iter, ops)) > 0) 893 iter.processed = iomap_unshare_iter(&iter); 894 return ret; 895 } 896 EXPORT_SYMBOL_GPL(iomap_file_unshare); 897 898 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 899 { 900 const struct iomap *srcmap = iomap_iter_srcmap(iter); 901 loff_t pos = iter->pos; 902 loff_t length = iomap_length(iter); 903 loff_t written = 0; 904 905 /* already zeroed? we're done. */ 906 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 907 return length; 908 909 do { 910 struct folio *folio; 911 int status; 912 size_t offset; 913 size_t bytes = min_t(u64, SIZE_MAX, length); 914 915 status = iomap_write_begin(iter, pos, bytes, &folio); 916 if (status) 917 return status; 918 919 offset = offset_in_folio(folio, pos); 920 if (bytes > folio_size(folio) - offset) 921 bytes = folio_size(folio) - offset; 922 923 folio_zero_range(folio, offset, bytes); 924 folio_mark_accessed(folio); 925 926 bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 927 if (WARN_ON_ONCE(bytes == 0)) 928 return -EIO; 929 930 pos += bytes; 931 length -= bytes; 932 written += bytes; 933 if (did_zero) 934 *did_zero = true; 935 } while (length > 0); 936 937 return written; 938 } 939 940 int 941 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 942 const struct iomap_ops *ops) 943 { 944 struct iomap_iter iter = { 945 .inode = inode, 946 .pos = pos, 947 .len = len, 948 .flags = IOMAP_ZERO, 949 }; 950 int ret; 951 952 while ((ret = iomap_iter(&iter, ops)) > 0) 953 iter.processed = iomap_zero_iter(&iter, did_zero); 954 return ret; 955 } 956 EXPORT_SYMBOL_GPL(iomap_zero_range); 957 958 int 959 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 960 const struct iomap_ops *ops) 961 { 962 unsigned int blocksize = i_blocksize(inode); 963 unsigned int off = pos & (blocksize - 1); 964 965 /* Block boundary? Nothing to do */ 966 if (!off) 967 return 0; 968 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 969 } 970 EXPORT_SYMBOL_GPL(iomap_truncate_page); 971 972 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 973 struct folio *folio) 974 { 975 loff_t length = iomap_length(iter); 976 int ret; 977 978 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 979 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 980 &iter->iomap); 981 if (ret) 982 return ret; 983 block_commit_write(&folio->page, 0, length); 984 } else { 985 WARN_ON_ONCE(!folio_test_uptodate(folio)); 986 folio_mark_dirty(folio); 987 } 988 989 return length; 990 } 991 992 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 993 { 994 struct iomap_iter iter = { 995 .inode = file_inode(vmf->vma->vm_file), 996 .flags = IOMAP_WRITE | IOMAP_FAULT, 997 }; 998 struct folio *folio = page_folio(vmf->page); 999 ssize_t ret; 1000 1001 folio_lock(folio); 1002 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1003 if (ret < 0) 1004 goto out_unlock; 1005 iter.pos = folio_pos(folio); 1006 iter.len = ret; 1007 while ((ret = iomap_iter(&iter, ops)) > 0) 1008 iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1009 1010 if (ret < 0) 1011 goto out_unlock; 1012 folio_wait_stable(folio); 1013 return VM_FAULT_LOCKED; 1014 out_unlock: 1015 folio_unlock(folio); 1016 return block_page_mkwrite_return(ret); 1017 } 1018 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1019 1020 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1021 size_t len, int error) 1022 { 1023 struct iomap_page *iop = to_iomap_page(folio); 1024 1025 if (error) { 1026 folio_set_error(folio); 1027 mapping_set_error(inode->i_mapping, error); 1028 } 1029 1030 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 1031 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1032 1033 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 1034 folio_end_writeback(folio); 1035 } 1036 1037 /* 1038 * We're now finished for good with this ioend structure. Update the page 1039 * state, release holds on bios, and finally free up memory. Do not use the 1040 * ioend after this. 1041 */ 1042 static void 1043 iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1044 { 1045 struct inode *inode = ioend->io_inode; 1046 struct bio *bio = &ioend->io_inline_bio; 1047 struct bio *last = ioend->io_bio, *next; 1048 u64 start = bio->bi_iter.bi_sector; 1049 loff_t offset = ioend->io_offset; 1050 bool quiet = bio_flagged(bio, BIO_QUIET); 1051 1052 for (bio = &ioend->io_inline_bio; bio; bio = next) { 1053 struct folio_iter fi; 1054 1055 /* 1056 * For the last bio, bi_private points to the ioend, so we 1057 * need to explicitly end the iteration here. 1058 */ 1059 if (bio == last) 1060 next = NULL; 1061 else 1062 next = bio->bi_private; 1063 1064 /* walk all folios in bio, ending page IO on them */ 1065 bio_for_each_folio_all(fi, bio) 1066 iomap_finish_folio_write(inode, fi.folio, fi.length, 1067 error); 1068 bio_put(bio); 1069 } 1070 /* The ioend has been freed by bio_put() */ 1071 1072 if (unlikely(error && !quiet)) { 1073 printk_ratelimited(KERN_ERR 1074 "%s: writeback error on inode %lu, offset %lld, sector %llu", 1075 inode->i_sb->s_id, inode->i_ino, offset, start); 1076 } 1077 } 1078 1079 void 1080 iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1081 { 1082 struct list_head tmp; 1083 1084 list_replace_init(&ioend->io_list, &tmp); 1085 iomap_finish_ioend(ioend, error); 1086 1087 while (!list_empty(&tmp)) { 1088 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1089 list_del_init(&ioend->io_list); 1090 iomap_finish_ioend(ioend, error); 1091 } 1092 } 1093 EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1094 1095 /* 1096 * We can merge two adjacent ioends if they have the same set of work to do. 1097 */ 1098 static bool 1099 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1100 { 1101 if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1102 return false; 1103 if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1104 (next->io_flags & IOMAP_F_SHARED)) 1105 return false; 1106 if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1107 (next->io_type == IOMAP_UNWRITTEN)) 1108 return false; 1109 if (ioend->io_offset + ioend->io_size != next->io_offset) 1110 return false; 1111 return true; 1112 } 1113 1114 void 1115 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1116 { 1117 struct iomap_ioend *next; 1118 1119 INIT_LIST_HEAD(&ioend->io_list); 1120 1121 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1122 io_list))) { 1123 if (!iomap_ioend_can_merge(ioend, next)) 1124 break; 1125 list_move_tail(&next->io_list, &ioend->io_list); 1126 ioend->io_size += next->io_size; 1127 } 1128 } 1129 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1130 1131 static int 1132 iomap_ioend_compare(void *priv, const struct list_head *a, 1133 const struct list_head *b) 1134 { 1135 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1136 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1137 1138 if (ia->io_offset < ib->io_offset) 1139 return -1; 1140 if (ia->io_offset > ib->io_offset) 1141 return 1; 1142 return 0; 1143 } 1144 1145 void 1146 iomap_sort_ioends(struct list_head *ioend_list) 1147 { 1148 list_sort(NULL, ioend_list, iomap_ioend_compare); 1149 } 1150 EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1151 1152 static void iomap_writepage_end_bio(struct bio *bio) 1153 { 1154 struct iomap_ioend *ioend = bio->bi_private; 1155 1156 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1157 } 1158 1159 /* 1160 * Submit the final bio for an ioend. 1161 * 1162 * If @error is non-zero, it means that we have a situation where some part of 1163 * the submission process has failed after we've marked pages for writeback 1164 * and unlocked them. In this situation, we need to fail the bio instead of 1165 * submitting it. This typically only happens on a filesystem shutdown. 1166 */ 1167 static int 1168 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1169 int error) 1170 { 1171 ioend->io_bio->bi_private = ioend; 1172 ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1173 1174 if (wpc->ops->prepare_ioend) 1175 error = wpc->ops->prepare_ioend(ioend, error); 1176 if (error) { 1177 /* 1178 * If we're failing the IO now, just mark the ioend with an 1179 * error and finish it. This will run IO completion immediately 1180 * as there is only one reference to the ioend at this point in 1181 * time. 1182 */ 1183 ioend->io_bio->bi_status = errno_to_blk_status(error); 1184 bio_endio(ioend->io_bio); 1185 return error; 1186 } 1187 1188 submit_bio(ioend->io_bio); 1189 return 0; 1190 } 1191 1192 static struct iomap_ioend * 1193 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1194 loff_t offset, sector_t sector, struct writeback_control *wbc) 1195 { 1196 struct iomap_ioend *ioend; 1197 struct bio *bio; 1198 1199 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset); 1200 bio_set_dev(bio, wpc->iomap.bdev); 1201 bio->bi_iter.bi_sector = sector; 1202 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); 1203 bio->bi_write_hint = inode->i_write_hint; 1204 wbc_init_bio(wbc, bio); 1205 1206 ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1207 INIT_LIST_HEAD(&ioend->io_list); 1208 ioend->io_type = wpc->iomap.type; 1209 ioend->io_flags = wpc->iomap.flags; 1210 ioend->io_inode = inode; 1211 ioend->io_size = 0; 1212 ioend->io_offset = offset; 1213 ioend->io_bio = bio; 1214 return ioend; 1215 } 1216 1217 /* 1218 * Allocate a new bio, and chain the old bio to the new one. 1219 * 1220 * Note that we have to perform the chaining in this unintuitive order 1221 * so that the bi_private linkage is set up in the right direction for the 1222 * traversal in iomap_finish_ioend(). 1223 */ 1224 static struct bio * 1225 iomap_chain_bio(struct bio *prev) 1226 { 1227 struct bio *new; 1228 1229 new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); 1230 bio_copy_dev(new, prev);/* also copies over blkcg information */ 1231 new->bi_iter.bi_sector = bio_end_sector(prev); 1232 new->bi_opf = prev->bi_opf; 1233 new->bi_write_hint = prev->bi_write_hint; 1234 1235 bio_chain(prev, new); 1236 bio_get(prev); /* for iomap_finish_ioend */ 1237 submit_bio(prev); 1238 return new; 1239 } 1240 1241 static bool 1242 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1243 sector_t sector) 1244 { 1245 if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1246 (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1247 return false; 1248 if (wpc->iomap.type != wpc->ioend->io_type) 1249 return false; 1250 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1251 return false; 1252 if (sector != bio_end_sector(wpc->ioend->io_bio)) 1253 return false; 1254 return true; 1255 } 1256 1257 /* 1258 * Test to see if we have an existing ioend structure that we could append to 1259 * first; otherwise finish off the current ioend and start another. 1260 */ 1261 static void 1262 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1263 struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1264 struct writeback_control *wbc, struct list_head *iolist) 1265 { 1266 sector_t sector = iomap_sector(&wpc->iomap, pos); 1267 unsigned len = i_blocksize(inode); 1268 size_t poff = offset_in_folio(folio, pos); 1269 1270 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1271 if (wpc->ioend) 1272 list_add(&wpc->ioend->io_list, iolist); 1273 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1274 } 1275 1276 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1277 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1278 bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1279 } 1280 1281 if (iop) 1282 atomic_add(len, &iop->write_bytes_pending); 1283 wpc->ioend->io_size += len; 1284 wbc_account_cgroup_owner(wbc, &folio->page, len); 1285 } 1286 1287 /* 1288 * We implement an immediate ioend submission policy here to avoid needing to 1289 * chain multiple ioends and hence nest mempool allocations which can violate 1290 * the forward progress guarantees we need to provide. The current ioend we're 1291 * adding blocks to is cached in the writepage context, and if the new block 1292 * doesn't append to the cached ioend, it will create a new ioend and cache that 1293 * instead. 1294 * 1295 * If a new ioend is created and cached, the old ioend is returned and queued 1296 * locally for submission once the entire page is processed or an error has been 1297 * detected. While ioends are submitted immediately after they are completed, 1298 * batching optimisations are provided by higher level block plugging. 1299 * 1300 * At the end of a writeback pass, there will be a cached ioend remaining on the 1301 * writepage context that the caller will need to submit. 1302 */ 1303 static int 1304 iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1305 struct writeback_control *wbc, struct inode *inode, 1306 struct folio *folio, u64 end_pos) 1307 { 1308 struct iomap_page *iop = iomap_page_create(inode, folio); 1309 struct iomap_ioend *ioend, *next; 1310 unsigned len = i_blocksize(inode); 1311 unsigned nblocks = i_blocks_per_folio(inode, folio); 1312 u64 pos = folio_pos(folio); 1313 int error = 0, count = 0, i; 1314 LIST_HEAD(submit_list); 1315 1316 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1317 1318 /* 1319 * Walk through the folio to find areas to write back. If we 1320 * run off the end of the current map or find the current map 1321 * invalid, grab a new one. 1322 */ 1323 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1324 if (iop && !test_bit(i, iop->uptodate)) 1325 continue; 1326 1327 error = wpc->ops->map_blocks(wpc, inode, pos); 1328 if (error) 1329 break; 1330 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 1331 continue; 1332 if (wpc->iomap.type == IOMAP_HOLE) 1333 continue; 1334 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1335 &submit_list); 1336 count++; 1337 } 1338 1339 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1340 WARN_ON_ONCE(!folio_test_locked(folio)); 1341 WARN_ON_ONCE(folio_test_writeback(folio)); 1342 WARN_ON_ONCE(folio_test_dirty(folio)); 1343 1344 /* 1345 * We cannot cancel the ioend directly here on error. We may have 1346 * already set other pages under writeback and hence we have to run I/O 1347 * completion to mark the error state of the pages under writeback 1348 * appropriately. 1349 */ 1350 if (unlikely(error)) { 1351 /* 1352 * Let the filesystem know what portion of the current page 1353 * failed to map. If the page hasn't been added to ioend, it 1354 * won't be affected by I/O completion and we must unlock it 1355 * now. 1356 */ 1357 if (wpc->ops->discard_folio) 1358 wpc->ops->discard_folio(folio, pos); 1359 if (!count) { 1360 folio_clear_uptodate(folio); 1361 folio_unlock(folio); 1362 goto done; 1363 } 1364 } 1365 1366 folio_start_writeback(folio); 1367 folio_unlock(folio); 1368 1369 /* 1370 * Preserve the original error if there was one; catch 1371 * submission errors here and propagate into subsequent ioend 1372 * submissions. 1373 */ 1374 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1375 int error2; 1376 1377 list_del_init(&ioend->io_list); 1378 error2 = iomap_submit_ioend(wpc, ioend, error); 1379 if (error2 && !error) 1380 error = error2; 1381 } 1382 1383 /* 1384 * We can end up here with no error and nothing to write only if we race 1385 * with a partial page truncate on a sub-page block sized filesystem. 1386 */ 1387 if (!count) 1388 folio_end_writeback(folio); 1389 done: 1390 mapping_set_error(folio->mapping, error); 1391 return error; 1392 } 1393 1394 /* 1395 * Write out a dirty page. 1396 * 1397 * For delalloc space on the page, we need to allocate space and flush it. 1398 * For unwritten space on the page, we need to start the conversion to 1399 * regular allocated space. 1400 */ 1401 static int 1402 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) 1403 { 1404 struct folio *folio = page_folio(page); 1405 struct iomap_writepage_ctx *wpc = data; 1406 struct inode *inode = folio->mapping->host; 1407 u64 end_pos, isize; 1408 1409 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1410 1411 /* 1412 * Refuse to write the folio out if we're called from reclaim context. 1413 * 1414 * This avoids stack overflows when called from deeply used stacks in 1415 * random callers for direct reclaim or memcg reclaim. We explicitly 1416 * allow reclaim from kswapd as the stack usage there is relatively low. 1417 * 1418 * This should never happen except in the case of a VM regression so 1419 * warn about it. 1420 */ 1421 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1422 PF_MEMALLOC)) 1423 goto redirty; 1424 1425 /* 1426 * Is this folio beyond the end of the file? 1427 * 1428 * The folio index is less than the end_index, adjust the end_pos 1429 * to the highest offset that this folio should represent. 1430 * ----------------------------------------------------- 1431 * | file mapping | <EOF> | 1432 * ----------------------------------------------------- 1433 * | Page ... | Page N-2 | Page N-1 | Page N | | 1434 * ^--------------------------------^----------|-------- 1435 * | desired writeback range | see else | 1436 * ---------------------------------^------------------| 1437 */ 1438 isize = i_size_read(inode); 1439 end_pos = folio_pos(folio) + folio_size(folio); 1440 if (end_pos > isize) { 1441 /* 1442 * Check whether the page to write out is beyond or straddles 1443 * i_size or not. 1444 * ------------------------------------------------------- 1445 * | file mapping | <EOF> | 1446 * ------------------------------------------------------- 1447 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1448 * ^--------------------------------^-----------|--------- 1449 * | | Straddles | 1450 * ---------------------------------^-----------|--------| 1451 */ 1452 size_t poff = offset_in_folio(folio, isize); 1453 pgoff_t end_index = isize >> PAGE_SHIFT; 1454 1455 /* 1456 * Skip the page if it's fully outside i_size, e.g. due to a 1457 * truncate operation that's in progress. We must redirty the 1458 * page so that reclaim stops reclaiming it. Otherwise 1459 * iomap_vm_releasepage() is called on it and gets confused. 1460 * 1461 * Note that the end_index is unsigned long. If the given 1462 * offset is greater than 16TB on a 32-bit system then if we 1463 * checked if the page is fully outside i_size with 1464 * "if (page->index >= end_index + 1)", "end_index + 1" would 1465 * overflow and evaluate to 0. Hence this page would be 1466 * redirtied and written out repeatedly, which would result in 1467 * an infinite loop; the user program performing this operation 1468 * would hang. Instead, we can detect this situation by 1469 * checking if the page is totally beyond i_size or if its 1470 * offset is just equal to the EOF. 1471 */ 1472 if (folio->index > end_index || 1473 (folio->index == end_index && poff == 0)) 1474 goto redirty; 1475 1476 /* 1477 * The page straddles i_size. It must be zeroed out on each 1478 * and every writepage invocation because it may be mmapped. 1479 * "A file is mapped in multiples of the page size. For a file 1480 * that is not a multiple of the page size, the remaining 1481 * memory is zeroed when mapped, and writes to that region are 1482 * not written out to the file." 1483 */ 1484 folio_zero_segment(folio, poff, folio_size(folio)); 1485 end_pos = isize; 1486 } 1487 1488 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1489 1490 redirty: 1491 folio_redirty_for_writepage(wbc, folio); 1492 folio_unlock(folio); 1493 return 0; 1494 } 1495 1496 int 1497 iomap_writepage(struct page *page, struct writeback_control *wbc, 1498 struct iomap_writepage_ctx *wpc, 1499 const struct iomap_writeback_ops *ops) 1500 { 1501 int ret; 1502 1503 wpc->ops = ops; 1504 ret = iomap_do_writepage(page, wbc, wpc); 1505 if (!wpc->ioend) 1506 return ret; 1507 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1508 } 1509 EXPORT_SYMBOL_GPL(iomap_writepage); 1510 1511 int 1512 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1513 struct iomap_writepage_ctx *wpc, 1514 const struct iomap_writeback_ops *ops) 1515 { 1516 int ret; 1517 1518 wpc->ops = ops; 1519 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1520 if (!wpc->ioend) 1521 return ret; 1522 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1523 } 1524 EXPORT_SYMBOL_GPL(iomap_writepages); 1525 1526 static int __init iomap_init(void) 1527 { 1528 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1529 offsetof(struct iomap_ioend, io_inline_bio), 1530 BIOSET_NEED_BVECS); 1531 } 1532 fs_initcall(iomap_init); 1533