1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2010 Red Hat, Inc. 4 * Copyright (C) 2016-2019 Christoph Hellwig. 5 */ 6 #include <linux/module.h> 7 #include <linux/compiler.h> 8 #include <linux/fs.h> 9 #include <linux/iomap.h> 10 #include <linux/pagemap.h> 11 #include <linux/uio.h> 12 #include <linux/buffer_head.h> 13 #include <linux/dax.h> 14 #include <linux/writeback.h> 15 #include <linux/list_sort.h> 16 #include <linux/swap.h> 17 #include <linux/bio.h> 18 #include <linux/sched/signal.h> 19 #include <linux/migrate.h> 20 #include "trace.h" 21 22 #include "../internal.h" 23 24 #define IOEND_BATCH_SIZE 4096 25 26 /* 27 * Structure allocated for each folio when block size < folio size 28 * to track sub-folio uptodate status and I/O completions. 29 */ 30 struct iomap_page { 31 atomic_t read_bytes_pending; 32 atomic_t write_bytes_pending; 33 spinlock_t uptodate_lock; 34 unsigned long uptodate[]; 35 }; 36 37 static inline struct iomap_page *to_iomap_page(struct folio *folio) 38 { 39 if (folio_test_private(folio)) 40 return folio_get_private(folio); 41 return NULL; 42 } 43 44 static struct bio_set iomap_ioend_bioset; 45 46 static struct iomap_page * 47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) 48 { 49 struct iomap_page *iop = to_iomap_page(folio); 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 51 gfp_t gfp; 52 53 if (iop || nr_blocks <= 1) 54 return iop; 55 56 if (flags & IOMAP_NOWAIT) 57 gfp = GFP_NOWAIT; 58 else 59 gfp = GFP_NOFS | __GFP_NOFAIL; 60 61 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), 62 gfp); 63 if (iop) { 64 spin_lock_init(&iop->uptodate_lock); 65 if (folio_test_uptodate(folio)) 66 bitmap_fill(iop->uptodate, nr_blocks); 67 folio_attach_private(folio, iop); 68 } 69 return iop; 70 } 71 72 static void iomap_page_release(struct folio *folio) 73 { 74 struct iomap_page *iop = folio_detach_private(folio); 75 struct inode *inode = folio->mapping->host; 76 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); 77 78 if (!iop) 79 return; 80 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); 81 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); 82 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != 83 folio_test_uptodate(folio)); 84 kfree(iop); 85 } 86 87 /* 88 * Calculate the range inside the folio that we actually need to read. 89 */ 90 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, 91 loff_t *pos, loff_t length, size_t *offp, size_t *lenp) 92 { 93 struct iomap_page *iop = to_iomap_page(folio); 94 loff_t orig_pos = *pos; 95 loff_t isize = i_size_read(inode); 96 unsigned block_bits = inode->i_blkbits; 97 unsigned block_size = (1 << block_bits); 98 size_t poff = offset_in_folio(folio, *pos); 99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); 100 unsigned first = poff >> block_bits; 101 unsigned last = (poff + plen - 1) >> block_bits; 102 103 /* 104 * If the block size is smaller than the page size, we need to check the 105 * per-block uptodate status and adjust the offset and length if needed 106 * to avoid reading in already uptodate ranges. 107 */ 108 if (iop) { 109 unsigned int i; 110 111 /* move forward for each leading block marked uptodate */ 112 for (i = first; i <= last; i++) { 113 if (!test_bit(i, iop->uptodate)) 114 break; 115 *pos += block_size; 116 poff += block_size; 117 plen -= block_size; 118 first++; 119 } 120 121 /* truncate len if we find any trailing uptodate block(s) */ 122 for ( ; i <= last; i++) { 123 if (test_bit(i, iop->uptodate)) { 124 plen -= (last - i + 1) * block_size; 125 last = i - 1; 126 break; 127 } 128 } 129 } 130 131 /* 132 * If the extent spans the block that contains the i_size, we need to 133 * handle both halves separately so that we properly zero data in the 134 * page cache for blocks that are entirely outside of i_size. 135 */ 136 if (orig_pos <= isize && orig_pos + length > isize) { 137 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; 138 139 if (first <= end && last > end) 140 plen -= (last - end) * block_size; 141 } 142 143 *offp = poff; 144 *lenp = plen; 145 } 146 147 static void iomap_iop_set_range_uptodate(struct folio *folio, 148 struct iomap_page *iop, size_t off, size_t len) 149 { 150 struct inode *inode = folio->mapping->host; 151 unsigned first = off >> inode->i_blkbits; 152 unsigned last = (off + len - 1) >> inode->i_blkbits; 153 unsigned long flags; 154 155 spin_lock_irqsave(&iop->uptodate_lock, flags); 156 bitmap_set(iop->uptodate, first, last - first + 1); 157 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) 158 folio_mark_uptodate(folio); 159 spin_unlock_irqrestore(&iop->uptodate_lock, flags); 160 } 161 162 static void iomap_set_range_uptodate(struct folio *folio, 163 struct iomap_page *iop, size_t off, size_t len) 164 { 165 if (iop) 166 iomap_iop_set_range_uptodate(folio, iop, off, len); 167 else 168 folio_mark_uptodate(folio); 169 } 170 171 static void iomap_finish_folio_read(struct folio *folio, size_t offset, 172 size_t len, int error) 173 { 174 struct iomap_page *iop = to_iomap_page(folio); 175 176 if (unlikely(error)) { 177 folio_clear_uptodate(folio); 178 folio_set_error(folio); 179 } else { 180 iomap_set_range_uptodate(folio, iop, offset, len); 181 } 182 183 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) 184 folio_unlock(folio); 185 } 186 187 static void iomap_read_end_io(struct bio *bio) 188 { 189 int error = blk_status_to_errno(bio->bi_status); 190 struct folio_iter fi; 191 192 bio_for_each_folio_all(fi, bio) 193 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); 194 bio_put(bio); 195 } 196 197 struct iomap_readpage_ctx { 198 struct folio *cur_folio; 199 bool cur_folio_in_bio; 200 struct bio *bio; 201 struct readahead_control *rac; 202 }; 203 204 /** 205 * iomap_read_inline_data - copy inline data into the page cache 206 * @iter: iteration structure 207 * @folio: folio to copy to 208 * 209 * Copy the inline data in @iter into @folio and zero out the rest of the folio. 210 * Only a single IOMAP_INLINE extent is allowed at the end of each file. 211 * Returns zero for success to complete the read, or the usual negative errno. 212 */ 213 static int iomap_read_inline_data(const struct iomap_iter *iter, 214 struct folio *folio) 215 { 216 struct iomap_page *iop; 217 const struct iomap *iomap = iomap_iter_srcmap(iter); 218 size_t size = i_size_read(iter->inode) - iomap->offset; 219 size_t poff = offset_in_page(iomap->offset); 220 size_t offset = offset_in_folio(folio, iomap->offset); 221 void *addr; 222 223 if (folio_test_uptodate(folio)) 224 return 0; 225 226 if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) 227 return -EIO; 228 if (WARN_ON_ONCE(size > PAGE_SIZE - 229 offset_in_page(iomap->inline_data))) 230 return -EIO; 231 if (WARN_ON_ONCE(size > iomap->length)) 232 return -EIO; 233 if (offset > 0) 234 iop = iomap_page_create(iter->inode, folio, iter->flags); 235 else 236 iop = to_iomap_page(folio); 237 238 addr = kmap_local_folio(folio, offset); 239 memcpy(addr, iomap->inline_data, size); 240 memset(addr + size, 0, PAGE_SIZE - poff - size); 241 kunmap_local(addr); 242 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); 243 return 0; 244 } 245 246 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, 247 loff_t pos) 248 { 249 const struct iomap *srcmap = iomap_iter_srcmap(iter); 250 251 return srcmap->type != IOMAP_MAPPED || 252 (srcmap->flags & IOMAP_F_NEW) || 253 pos >= i_size_read(iter->inode); 254 } 255 256 static loff_t iomap_readpage_iter(const struct iomap_iter *iter, 257 struct iomap_readpage_ctx *ctx, loff_t offset) 258 { 259 const struct iomap *iomap = &iter->iomap; 260 loff_t pos = iter->pos + offset; 261 loff_t length = iomap_length(iter) - offset; 262 struct folio *folio = ctx->cur_folio; 263 struct iomap_page *iop; 264 loff_t orig_pos = pos; 265 size_t poff, plen; 266 sector_t sector; 267 268 if (iomap->type == IOMAP_INLINE) 269 return iomap_read_inline_data(iter, folio); 270 271 /* zero post-eof blocks as the page may be mapped */ 272 iop = iomap_page_create(iter->inode, folio, iter->flags); 273 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); 274 if (plen == 0) 275 goto done; 276 277 if (iomap_block_needs_zeroing(iter, pos)) { 278 folio_zero_range(folio, poff, plen); 279 iomap_set_range_uptodate(folio, iop, poff, plen); 280 goto done; 281 } 282 283 ctx->cur_folio_in_bio = true; 284 if (iop) 285 atomic_add(plen, &iop->read_bytes_pending); 286 287 sector = iomap_sector(iomap, pos); 288 if (!ctx->bio || 289 bio_end_sector(ctx->bio) != sector || 290 !bio_add_folio(ctx->bio, folio, plen, poff)) { 291 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 292 gfp_t orig_gfp = gfp; 293 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); 294 295 if (ctx->bio) 296 submit_bio(ctx->bio); 297 298 if (ctx->rac) /* same as readahead_gfp_mask */ 299 gfp |= __GFP_NORETRY | __GFP_NOWARN; 300 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), 301 REQ_OP_READ, gfp); 302 /* 303 * If the bio_alloc fails, try it again for a single page to 304 * avoid having to deal with partial page reads. This emulates 305 * what do_mpage_read_folio does. 306 */ 307 if (!ctx->bio) { 308 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, 309 orig_gfp); 310 } 311 if (ctx->rac) 312 ctx->bio->bi_opf |= REQ_RAHEAD; 313 ctx->bio->bi_iter.bi_sector = sector; 314 ctx->bio->bi_end_io = iomap_read_end_io; 315 bio_add_folio(ctx->bio, folio, plen, poff); 316 } 317 318 done: 319 /* 320 * Move the caller beyond our range so that it keeps making progress. 321 * For that, we have to include any leading non-uptodate ranges, but 322 * we can skip trailing ones as they will be handled in the next 323 * iteration. 324 */ 325 return pos - orig_pos + plen; 326 } 327 328 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) 329 { 330 struct iomap_iter iter = { 331 .inode = folio->mapping->host, 332 .pos = folio_pos(folio), 333 .len = folio_size(folio), 334 }; 335 struct iomap_readpage_ctx ctx = { 336 .cur_folio = folio, 337 }; 338 int ret; 339 340 trace_iomap_readpage(iter.inode, 1); 341 342 while ((ret = iomap_iter(&iter, ops)) > 0) 343 iter.processed = iomap_readpage_iter(&iter, &ctx, 0); 344 345 if (ret < 0) 346 folio_set_error(folio); 347 348 if (ctx.bio) { 349 submit_bio(ctx.bio); 350 WARN_ON_ONCE(!ctx.cur_folio_in_bio); 351 } else { 352 WARN_ON_ONCE(ctx.cur_folio_in_bio); 353 folio_unlock(folio); 354 } 355 356 /* 357 * Just like mpage_readahead and block_read_full_folio, we always 358 * return 0 and just set the folio error flag on errors. This 359 * should be cleaned up throughout the stack eventually. 360 */ 361 return 0; 362 } 363 EXPORT_SYMBOL_GPL(iomap_read_folio); 364 365 static loff_t iomap_readahead_iter(const struct iomap_iter *iter, 366 struct iomap_readpage_ctx *ctx) 367 { 368 loff_t length = iomap_length(iter); 369 loff_t done, ret; 370 371 for (done = 0; done < length; done += ret) { 372 if (ctx->cur_folio && 373 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { 374 if (!ctx->cur_folio_in_bio) 375 folio_unlock(ctx->cur_folio); 376 ctx->cur_folio = NULL; 377 } 378 if (!ctx->cur_folio) { 379 ctx->cur_folio = readahead_folio(ctx->rac); 380 ctx->cur_folio_in_bio = false; 381 } 382 ret = iomap_readpage_iter(iter, ctx, done); 383 if (ret <= 0) 384 return ret; 385 } 386 387 return done; 388 } 389 390 /** 391 * iomap_readahead - Attempt to read pages from a file. 392 * @rac: Describes the pages to be read. 393 * @ops: The operations vector for the filesystem. 394 * 395 * This function is for filesystems to call to implement their readahead 396 * address_space operation. 397 * 398 * Context: The @ops callbacks may submit I/O (eg to read the addresses of 399 * blocks from disc), and may wait for it. The caller may be trying to 400 * access a different page, and so sleeping excessively should be avoided. 401 * It may allocate memory, but should avoid costly allocations. This 402 * function is called with memalloc_nofs set, so allocations will not cause 403 * the filesystem to be reentered. 404 */ 405 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) 406 { 407 struct iomap_iter iter = { 408 .inode = rac->mapping->host, 409 .pos = readahead_pos(rac), 410 .len = readahead_length(rac), 411 }; 412 struct iomap_readpage_ctx ctx = { 413 .rac = rac, 414 }; 415 416 trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); 417 418 while (iomap_iter(&iter, ops) > 0) 419 iter.processed = iomap_readahead_iter(&iter, &ctx); 420 421 if (ctx.bio) 422 submit_bio(ctx.bio); 423 if (ctx.cur_folio) { 424 if (!ctx.cur_folio_in_bio) 425 folio_unlock(ctx.cur_folio); 426 } 427 } 428 EXPORT_SYMBOL_GPL(iomap_readahead); 429 430 /* 431 * iomap_is_partially_uptodate checks whether blocks within a folio are 432 * uptodate or not. 433 * 434 * Returns true if all blocks which correspond to the specified part 435 * of the folio are uptodate. 436 */ 437 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 438 { 439 struct iomap_page *iop = to_iomap_page(folio); 440 struct inode *inode = folio->mapping->host; 441 unsigned first, last, i; 442 443 if (!iop) 444 return false; 445 446 /* Caller's range may extend past the end of this folio */ 447 count = min(folio_size(folio) - from, count); 448 449 /* First and last blocks in range within folio */ 450 first = from >> inode->i_blkbits; 451 last = (from + count - 1) >> inode->i_blkbits; 452 453 for (i = first; i <= last; i++) 454 if (!test_bit(i, iop->uptodate)) 455 return false; 456 return true; 457 } 458 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 459 460 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) 461 { 462 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), 463 folio_size(folio)); 464 465 /* 466 * mm accommodates an old ext3 case where clean folios might 467 * not have had the dirty bit cleared. Thus, it can send actual 468 * dirty folios to ->release_folio() via shrink_active_list(); 469 * skip those here. 470 */ 471 if (folio_test_dirty(folio) || folio_test_writeback(folio)) 472 return false; 473 iomap_page_release(folio); 474 return true; 475 } 476 EXPORT_SYMBOL_GPL(iomap_release_folio); 477 478 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) 479 { 480 trace_iomap_invalidate_folio(folio->mapping->host, 481 folio_pos(folio) + offset, len); 482 483 /* 484 * If we're invalidating the entire folio, clear the dirty state 485 * from it and release it to avoid unnecessary buildup of the LRU. 486 */ 487 if (offset == 0 && len == folio_size(folio)) { 488 WARN_ON_ONCE(folio_test_writeback(folio)); 489 folio_cancel_dirty(folio); 490 iomap_page_release(folio); 491 } else if (folio_test_large(folio)) { 492 /* Must release the iop so the page can be split */ 493 WARN_ON_ONCE(!folio_test_uptodate(folio) && 494 folio_test_dirty(folio)); 495 iomap_page_release(folio); 496 } 497 } 498 EXPORT_SYMBOL_GPL(iomap_invalidate_folio); 499 500 static void 501 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 502 { 503 loff_t i_size = i_size_read(inode); 504 505 /* 506 * Only truncate newly allocated pages beyoned EOF, even if the 507 * write started inside the existing inode size. 508 */ 509 if (pos + len > i_size) 510 truncate_pagecache_range(inode, max(pos, i_size), 511 pos + len - 1); 512 } 513 514 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, 515 size_t poff, size_t plen, const struct iomap *iomap) 516 { 517 struct bio_vec bvec; 518 struct bio bio; 519 520 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); 521 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 522 bio_add_folio(&bio, folio, plen, poff); 523 return submit_bio_wait(&bio); 524 } 525 526 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, 527 size_t len, struct folio *folio) 528 { 529 const struct iomap *srcmap = iomap_iter_srcmap(iter); 530 struct iomap_page *iop; 531 loff_t block_size = i_blocksize(iter->inode); 532 loff_t block_start = round_down(pos, block_size); 533 loff_t block_end = round_up(pos + len, block_size); 534 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); 535 size_t from = offset_in_folio(folio, pos), to = from + len; 536 size_t poff, plen; 537 538 if (folio_test_uptodate(folio)) 539 return 0; 540 folio_clear_error(folio); 541 542 iop = iomap_page_create(iter->inode, folio, iter->flags); 543 if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1) 544 return -EAGAIN; 545 546 do { 547 iomap_adjust_read_range(iter->inode, folio, &block_start, 548 block_end - block_start, &poff, &plen); 549 if (plen == 0) 550 break; 551 552 if (!(iter->flags & IOMAP_UNSHARE) && 553 (from <= poff || from >= poff + plen) && 554 (to <= poff || to >= poff + plen)) 555 continue; 556 557 if (iomap_block_needs_zeroing(iter, block_start)) { 558 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) 559 return -EIO; 560 folio_zero_segments(folio, poff, from, to, poff + plen); 561 } else { 562 int status; 563 564 if (iter->flags & IOMAP_NOWAIT) 565 return -EAGAIN; 566 567 status = iomap_read_folio_sync(block_start, folio, 568 poff, plen, srcmap); 569 if (status) 570 return status; 571 } 572 iomap_set_range_uptodate(folio, iop, poff, plen); 573 } while ((block_start += plen) < block_end); 574 575 return 0; 576 } 577 578 static int iomap_write_begin_inline(const struct iomap_iter *iter, 579 struct folio *folio) 580 { 581 /* needs more work for the tailpacking case; disable for now */ 582 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) 583 return -EIO; 584 return iomap_read_inline_data(iter, folio); 585 } 586 587 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, 588 size_t len, struct folio **foliop) 589 { 590 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 591 const struct iomap *srcmap = iomap_iter_srcmap(iter); 592 struct folio *folio; 593 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; 594 int status = 0; 595 596 if (iter->flags & IOMAP_NOWAIT) 597 fgp |= FGP_NOWAIT; 598 599 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); 600 if (srcmap != &iter->iomap) 601 BUG_ON(pos + len > srcmap->offset + srcmap->length); 602 603 if (fatal_signal_pending(current)) 604 return -EINTR; 605 606 if (!mapping_large_folio_support(iter->inode->i_mapping)) 607 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 608 609 if (page_ops && page_ops->page_prepare) { 610 status = page_ops->page_prepare(iter->inode, pos, len); 611 if (status) 612 return status; 613 } 614 615 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, 616 fgp, mapping_gfp_mask(iter->inode->i_mapping)); 617 if (!folio) { 618 status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM; 619 goto out_no_page; 620 } 621 622 /* 623 * Now we have a locked folio, before we do anything with it we need to 624 * check that the iomap we have cached is not stale. The inode extent 625 * mapping can change due to concurrent IO in flight (e.g. 626 * IOMAP_UNWRITTEN state can change and memory reclaim could have 627 * reclaimed a previously partially written page at this index after IO 628 * completion before this write reaches this file offset) and hence we 629 * could do the wrong thing here (zero a page range incorrectly or fail 630 * to zero) and corrupt data. 631 */ 632 if (page_ops && page_ops->iomap_valid) { 633 bool iomap_valid = page_ops->iomap_valid(iter->inode, 634 &iter->iomap); 635 if (!iomap_valid) { 636 iter->iomap.flags |= IOMAP_F_STALE; 637 status = 0; 638 goto out_unlock; 639 } 640 } 641 642 if (pos + len > folio_pos(folio) + folio_size(folio)) 643 len = folio_pos(folio) + folio_size(folio) - pos; 644 645 if (srcmap->type == IOMAP_INLINE) 646 status = iomap_write_begin_inline(iter, folio); 647 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) 648 status = __block_write_begin_int(folio, pos, len, NULL, srcmap); 649 else 650 status = __iomap_write_begin(iter, pos, len, folio); 651 652 if (unlikely(status)) 653 goto out_unlock; 654 655 *foliop = folio; 656 return 0; 657 658 out_unlock: 659 folio_unlock(folio); 660 folio_put(folio); 661 iomap_write_failed(iter->inode, pos, len); 662 663 out_no_page: 664 if (page_ops && page_ops->page_done) 665 page_ops->page_done(iter->inode, pos, 0, NULL); 666 return status; 667 } 668 669 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 670 size_t copied, struct folio *folio) 671 { 672 struct iomap_page *iop = to_iomap_page(folio); 673 flush_dcache_folio(folio); 674 675 /* 676 * The blocks that were entirely written will now be uptodate, so we 677 * don't have to worry about a read_folio reading them and overwriting a 678 * partial write. However, if we've encountered a short write and only 679 * partially written into a block, it will not be marked uptodate, so a 680 * read_folio might come in and destroy our partial write. 681 * 682 * Do the simplest thing and just treat any short write to a 683 * non-uptodate page as a zero-length write, and force the caller to 684 * redo the whole thing. 685 */ 686 if (unlikely(copied < len && !folio_test_uptodate(folio))) 687 return 0; 688 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 689 filemap_dirty_folio(inode->i_mapping, folio); 690 return copied; 691 } 692 693 static size_t iomap_write_end_inline(const struct iomap_iter *iter, 694 struct folio *folio, loff_t pos, size_t copied) 695 { 696 const struct iomap *iomap = &iter->iomap; 697 void *addr; 698 699 WARN_ON_ONCE(!folio_test_uptodate(folio)); 700 BUG_ON(!iomap_inline_data_valid(iomap)); 701 702 flush_dcache_folio(folio); 703 addr = kmap_local_folio(folio, pos); 704 memcpy(iomap_inline_data(iomap, pos), addr, copied); 705 kunmap_local(addr); 706 707 mark_inode_dirty(iter->inode); 708 return copied; 709 } 710 711 /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ 712 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, 713 size_t copied, struct folio *folio) 714 { 715 const struct iomap_page_ops *page_ops = iter->iomap.page_ops; 716 const struct iomap *srcmap = iomap_iter_srcmap(iter); 717 loff_t old_size = iter->inode->i_size; 718 size_t ret; 719 720 if (srcmap->type == IOMAP_INLINE) { 721 ret = iomap_write_end_inline(iter, folio, pos, copied); 722 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { 723 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, 724 copied, &folio->page, NULL); 725 } else { 726 ret = __iomap_write_end(iter->inode, pos, len, copied, folio); 727 } 728 729 /* 730 * Update the in-memory inode size after copying the data into the page 731 * cache. It's up to the file system to write the updated size to disk, 732 * preferably after I/O completion so that no stale data is exposed. 733 */ 734 if (pos + ret > old_size) { 735 i_size_write(iter->inode, pos + ret); 736 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; 737 } 738 folio_unlock(folio); 739 740 if (old_size < pos) 741 pagecache_isize_extended(iter->inode, old_size, pos); 742 if (page_ops && page_ops->page_done) 743 page_ops->page_done(iter->inode, pos, ret, &folio->page); 744 folio_put(folio); 745 746 if (ret < len) 747 iomap_write_failed(iter->inode, pos + ret, len - ret); 748 return ret; 749 } 750 751 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) 752 { 753 loff_t length = iomap_length(iter); 754 loff_t pos = iter->pos; 755 ssize_t written = 0; 756 long status = 0; 757 struct address_space *mapping = iter->inode->i_mapping; 758 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; 759 760 do { 761 struct folio *folio; 762 struct page *page; 763 unsigned long offset; /* Offset into pagecache page */ 764 unsigned long bytes; /* Bytes to write to page */ 765 size_t copied; /* Bytes copied from user */ 766 767 offset = offset_in_page(pos); 768 bytes = min_t(unsigned long, PAGE_SIZE - offset, 769 iov_iter_count(i)); 770 again: 771 status = balance_dirty_pages_ratelimited_flags(mapping, 772 bdp_flags); 773 if (unlikely(status)) 774 break; 775 776 if (bytes > length) 777 bytes = length; 778 779 /* 780 * Bring in the user page that we'll copy from _first_. 781 * Otherwise there's a nasty deadlock on copying from the 782 * same page as we're writing to, without it being marked 783 * up-to-date. 784 * 785 * For async buffered writes the assumption is that the user 786 * page has already been faulted in. This can be optimized by 787 * faulting the user page. 788 */ 789 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 790 status = -EFAULT; 791 break; 792 } 793 794 status = iomap_write_begin(iter, pos, bytes, &folio); 795 if (unlikely(status)) 796 break; 797 if (iter->iomap.flags & IOMAP_F_STALE) 798 break; 799 800 page = folio_file_page(folio, pos >> PAGE_SHIFT); 801 if (mapping_writably_mapped(mapping)) 802 flush_dcache_page(page); 803 804 copied = copy_page_from_iter_atomic(page, offset, bytes, i); 805 806 status = iomap_write_end(iter, pos, bytes, copied, folio); 807 808 if (unlikely(copied != status)) 809 iov_iter_revert(i, copied - status); 810 811 cond_resched(); 812 if (unlikely(status == 0)) { 813 /* 814 * A short copy made iomap_write_end() reject the 815 * thing entirely. Might be memory poisoning 816 * halfway through, might be a race with munmap, 817 * might be severe memory pressure. 818 */ 819 if (copied) 820 bytes = copied; 821 goto again; 822 } 823 pos += status; 824 written += status; 825 length -= status; 826 } while (iov_iter_count(i) && length); 827 828 if (status == -EAGAIN) { 829 iov_iter_revert(i, written); 830 return -EAGAIN; 831 } 832 return written ? written : status; 833 } 834 835 ssize_t 836 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, 837 const struct iomap_ops *ops) 838 { 839 struct iomap_iter iter = { 840 .inode = iocb->ki_filp->f_mapping->host, 841 .pos = iocb->ki_pos, 842 .len = iov_iter_count(i), 843 .flags = IOMAP_WRITE, 844 }; 845 int ret; 846 847 if (iocb->ki_flags & IOCB_NOWAIT) 848 iter.flags |= IOMAP_NOWAIT; 849 850 while ((ret = iomap_iter(&iter, ops)) > 0) 851 iter.processed = iomap_write_iter(&iter, i); 852 if (iter.pos == iocb->ki_pos) 853 return ret; 854 return iter.pos - iocb->ki_pos; 855 } 856 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 857 858 /* 859 * Scan the data range passed to us for dirty page cache folios. If we find a 860 * dirty folio, punch out the preceeding range and update the offset from which 861 * the next punch will start from. 862 * 863 * We can punch out storage reservations under clean pages because they either 864 * contain data that has been written back - in which case the delalloc punch 865 * over that range is a no-op - or they have been read faults in which case they 866 * contain zeroes and we can remove the delalloc backing range and any new 867 * writes to those pages will do the normal hole filling operation... 868 * 869 * This makes the logic simple: we only need to keep the delalloc extents only 870 * over the dirty ranges of the page cache. 871 * 872 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 873 * simplify range iterations. 874 */ 875 static int iomap_write_delalloc_scan(struct inode *inode, 876 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, 877 int (*punch)(struct inode *inode, loff_t offset, loff_t length)) 878 { 879 while (start_byte < end_byte) { 880 struct folio *folio; 881 882 /* grab locked page */ 883 folio = filemap_lock_folio(inode->i_mapping, 884 start_byte >> PAGE_SHIFT); 885 if (!folio) { 886 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + 887 PAGE_SIZE; 888 continue; 889 } 890 891 /* if dirty, punch up to offset */ 892 if (folio_test_dirty(folio)) { 893 if (start_byte > *punch_start_byte) { 894 int error; 895 896 error = punch(inode, *punch_start_byte, 897 start_byte - *punch_start_byte); 898 if (error) { 899 folio_unlock(folio); 900 folio_put(folio); 901 return error; 902 } 903 } 904 905 /* 906 * Make sure the next punch start is correctly bound to 907 * the end of this data range, not the end of the folio. 908 */ 909 *punch_start_byte = min_t(loff_t, end_byte, 910 folio_next_index(folio) << PAGE_SHIFT); 911 } 912 913 /* move offset to start of next folio in range */ 914 start_byte = folio_next_index(folio) << PAGE_SHIFT; 915 folio_unlock(folio); 916 folio_put(folio); 917 } 918 return 0; 919 } 920 921 /* 922 * Punch out all the delalloc blocks in the range given except for those that 923 * have dirty data still pending in the page cache - those are going to be 924 * written and so must still retain the delalloc backing for writeback. 925 * 926 * As we are scanning the page cache for data, we don't need to reimplement the 927 * wheel - mapping_seek_hole_data() does exactly what we need to identify the 928 * start and end of data ranges correctly even for sub-folio block sizes. This 929 * byte range based iteration is especially convenient because it means we 930 * don't have to care about variable size folios, nor where the start or end of 931 * the data range lies within a folio, if they lie within the same folio or even 932 * if there are multiple discontiguous data ranges within the folio. 933 * 934 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so 935 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault 936 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to 937 * date. A write page fault can then mark it dirty. If we then fail a write() 938 * beyond EOF into that up to date cached range, we allocate a delalloc block 939 * beyond EOF and then have to punch it out. Because the range is up to date, 940 * mapping_seek_hole_data() will return it, and we will skip the punch because 941 * the folio is dirty. THis is incorrect - we always need to punch out delalloc 942 * beyond EOF in this case as writeback will never write back and covert that 943 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, 944 * resulting in always punching out the range from the EOF to the end of the 945 * range the iomap spans. 946 * 947 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it 948 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA 949 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) 950 * returns the end of the data range (data_end). Using closed intervals would 951 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose 952 * the code to subtle off-by-one bugs.... 953 */ 954 static int iomap_write_delalloc_release(struct inode *inode, 955 loff_t start_byte, loff_t end_byte, 956 int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 957 { 958 loff_t punch_start_byte = start_byte; 959 loff_t scan_end_byte = min(i_size_read(inode), end_byte); 960 int error = 0; 961 962 /* 963 * Lock the mapping to avoid races with page faults re-instantiating 964 * folios and dirtying them via ->page_mkwrite whilst we walk the 965 * cache and perform delalloc extent removal. Failing to do this can 966 * leave dirty pages with no space reservation in the cache. 967 */ 968 filemap_invalidate_lock(inode->i_mapping); 969 while (start_byte < scan_end_byte) { 970 loff_t data_end; 971 972 start_byte = mapping_seek_hole_data(inode->i_mapping, 973 start_byte, scan_end_byte, SEEK_DATA); 974 /* 975 * If there is no more data to scan, all that is left is to 976 * punch out the remaining range. 977 */ 978 if (start_byte == -ENXIO || start_byte == scan_end_byte) 979 break; 980 if (start_byte < 0) { 981 error = start_byte; 982 goto out_unlock; 983 } 984 WARN_ON_ONCE(start_byte < punch_start_byte); 985 WARN_ON_ONCE(start_byte > scan_end_byte); 986 987 /* 988 * We find the end of this contiguous cached data range by 989 * seeking from start_byte to the beginning of the next hole. 990 */ 991 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, 992 scan_end_byte, SEEK_HOLE); 993 if (data_end < 0) { 994 error = data_end; 995 goto out_unlock; 996 } 997 WARN_ON_ONCE(data_end <= start_byte); 998 WARN_ON_ONCE(data_end > scan_end_byte); 999 1000 error = iomap_write_delalloc_scan(inode, &punch_start_byte, 1001 start_byte, data_end, punch); 1002 if (error) 1003 goto out_unlock; 1004 1005 /* The next data search starts at the end of this one. */ 1006 start_byte = data_end; 1007 } 1008 1009 if (punch_start_byte < end_byte) 1010 error = punch(inode, punch_start_byte, 1011 end_byte - punch_start_byte); 1012 out_unlock: 1013 filemap_invalidate_unlock(inode->i_mapping); 1014 return error; 1015 } 1016 1017 /* 1018 * When a short write occurs, the filesystem may need to remove reserved space 1019 * that was allocated in ->iomap_begin from it's ->iomap_end method. For 1020 * filesystems that use delayed allocation, we need to punch out delalloc 1021 * extents from the range that are not dirty in the page cache. As the write can 1022 * race with page faults, there can be dirty pages over the delalloc extent 1023 * outside the range of a short write but still within the delalloc extent 1024 * allocated for this iomap. 1025 * 1026 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to 1027 * simplify range iterations. 1028 * 1029 * The punch() callback *must* only punch delalloc extents in the range passed 1030 * to it. It must skip over all other types of extents in the range and leave 1031 * them completely unchanged. It must do this punch atomically with respect to 1032 * other extent modifications. 1033 * 1034 * The punch() callback may be called with a folio locked to prevent writeback 1035 * extent allocation racing at the edge of the range we are currently punching. 1036 * The locked folio may or may not cover the range being punched, so it is not 1037 * safe for the punch() callback to lock folios itself. 1038 * 1039 * Lock order is: 1040 * 1041 * inode->i_rwsem (shared or exclusive) 1042 * inode->i_mapping->invalidate_lock (exclusive) 1043 * folio_lock() 1044 * ->punch 1045 * internal filesystem allocation lock 1046 */ 1047 int iomap_file_buffered_write_punch_delalloc(struct inode *inode, 1048 struct iomap *iomap, loff_t pos, loff_t length, 1049 ssize_t written, 1050 int (*punch)(struct inode *inode, loff_t pos, loff_t length)) 1051 { 1052 loff_t start_byte; 1053 loff_t end_byte; 1054 int blocksize = i_blocksize(inode); 1055 1056 if (iomap->type != IOMAP_DELALLOC) 1057 return 0; 1058 1059 /* If we didn't reserve the blocks, we're not allowed to punch them. */ 1060 if (!(iomap->flags & IOMAP_F_NEW)) 1061 return 0; 1062 1063 /* 1064 * start_byte refers to the first unused block after a short write. If 1065 * nothing was written, round offset down to point at the first block in 1066 * the range. 1067 */ 1068 if (unlikely(!written)) 1069 start_byte = round_down(pos, blocksize); 1070 else 1071 start_byte = round_up(pos + written, blocksize); 1072 end_byte = round_up(pos + length, blocksize); 1073 1074 /* Nothing to do if we've written the entire delalloc extent */ 1075 if (start_byte >= end_byte) 1076 return 0; 1077 1078 return iomap_write_delalloc_release(inode, start_byte, end_byte, 1079 punch); 1080 } 1081 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); 1082 1083 static loff_t iomap_unshare_iter(struct iomap_iter *iter) 1084 { 1085 struct iomap *iomap = &iter->iomap; 1086 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1087 loff_t pos = iter->pos; 1088 loff_t length = iomap_length(iter); 1089 long status = 0; 1090 loff_t written = 0; 1091 1092 /* don't bother with blocks that are not shared to start with */ 1093 if (!(iomap->flags & IOMAP_F_SHARED)) 1094 return length; 1095 /* don't bother with holes or unwritten extents */ 1096 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1097 return length; 1098 1099 do { 1100 unsigned long offset = offset_in_page(pos); 1101 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); 1102 struct folio *folio; 1103 1104 status = iomap_write_begin(iter, pos, bytes, &folio); 1105 if (unlikely(status)) 1106 return status; 1107 if (iter->iomap.flags & IOMAP_F_STALE) 1108 break; 1109 1110 status = iomap_write_end(iter, pos, bytes, bytes, folio); 1111 if (WARN_ON_ONCE(status == 0)) 1112 return -EIO; 1113 1114 cond_resched(); 1115 1116 pos += status; 1117 written += status; 1118 length -= status; 1119 1120 balance_dirty_pages_ratelimited(iter->inode->i_mapping); 1121 } while (length); 1122 1123 return written; 1124 } 1125 1126 int 1127 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 1128 const struct iomap_ops *ops) 1129 { 1130 struct iomap_iter iter = { 1131 .inode = inode, 1132 .pos = pos, 1133 .len = len, 1134 .flags = IOMAP_WRITE | IOMAP_UNSHARE, 1135 }; 1136 int ret; 1137 1138 while ((ret = iomap_iter(&iter, ops)) > 0) 1139 iter.processed = iomap_unshare_iter(&iter); 1140 return ret; 1141 } 1142 EXPORT_SYMBOL_GPL(iomap_file_unshare); 1143 1144 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) 1145 { 1146 const struct iomap *srcmap = iomap_iter_srcmap(iter); 1147 loff_t pos = iter->pos; 1148 loff_t length = iomap_length(iter); 1149 loff_t written = 0; 1150 1151 /* already zeroed? we're done. */ 1152 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) 1153 return length; 1154 1155 do { 1156 struct folio *folio; 1157 int status; 1158 size_t offset; 1159 size_t bytes = min_t(u64, SIZE_MAX, length); 1160 1161 status = iomap_write_begin(iter, pos, bytes, &folio); 1162 if (status) 1163 return status; 1164 if (iter->iomap.flags & IOMAP_F_STALE) 1165 break; 1166 1167 offset = offset_in_folio(folio, pos); 1168 if (bytes > folio_size(folio) - offset) 1169 bytes = folio_size(folio) - offset; 1170 1171 folio_zero_range(folio, offset, bytes); 1172 folio_mark_accessed(folio); 1173 1174 bytes = iomap_write_end(iter, pos, bytes, bytes, folio); 1175 if (WARN_ON_ONCE(bytes == 0)) 1176 return -EIO; 1177 1178 pos += bytes; 1179 length -= bytes; 1180 written += bytes; 1181 } while (length > 0); 1182 1183 if (did_zero) 1184 *did_zero = true; 1185 return written; 1186 } 1187 1188 int 1189 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1190 const struct iomap_ops *ops) 1191 { 1192 struct iomap_iter iter = { 1193 .inode = inode, 1194 .pos = pos, 1195 .len = len, 1196 .flags = IOMAP_ZERO, 1197 }; 1198 int ret; 1199 1200 while ((ret = iomap_iter(&iter, ops)) > 0) 1201 iter.processed = iomap_zero_iter(&iter, did_zero); 1202 return ret; 1203 } 1204 EXPORT_SYMBOL_GPL(iomap_zero_range); 1205 1206 int 1207 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1208 const struct iomap_ops *ops) 1209 { 1210 unsigned int blocksize = i_blocksize(inode); 1211 unsigned int off = pos & (blocksize - 1); 1212 1213 /* Block boundary? Nothing to do */ 1214 if (!off) 1215 return 0; 1216 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1217 } 1218 EXPORT_SYMBOL_GPL(iomap_truncate_page); 1219 1220 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, 1221 struct folio *folio) 1222 { 1223 loff_t length = iomap_length(iter); 1224 int ret; 1225 1226 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { 1227 ret = __block_write_begin_int(folio, iter->pos, length, NULL, 1228 &iter->iomap); 1229 if (ret) 1230 return ret; 1231 block_commit_write(&folio->page, 0, length); 1232 } else { 1233 WARN_ON_ONCE(!folio_test_uptodate(folio)); 1234 folio_mark_dirty(folio); 1235 } 1236 1237 return length; 1238 } 1239 1240 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1241 { 1242 struct iomap_iter iter = { 1243 .inode = file_inode(vmf->vma->vm_file), 1244 .flags = IOMAP_WRITE | IOMAP_FAULT, 1245 }; 1246 struct folio *folio = page_folio(vmf->page); 1247 ssize_t ret; 1248 1249 folio_lock(folio); 1250 ret = folio_mkwrite_check_truncate(folio, iter.inode); 1251 if (ret < 0) 1252 goto out_unlock; 1253 iter.pos = folio_pos(folio); 1254 iter.len = ret; 1255 while ((ret = iomap_iter(&iter, ops)) > 0) 1256 iter.processed = iomap_folio_mkwrite_iter(&iter, folio); 1257 1258 if (ret < 0) 1259 goto out_unlock; 1260 folio_wait_stable(folio); 1261 return VM_FAULT_LOCKED; 1262 out_unlock: 1263 folio_unlock(folio); 1264 return block_page_mkwrite_return(ret); 1265 } 1266 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1267 1268 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, 1269 size_t len, int error) 1270 { 1271 struct iomap_page *iop = to_iomap_page(folio); 1272 1273 if (error) { 1274 folio_set_error(folio); 1275 mapping_set_error(inode->i_mapping, error); 1276 } 1277 1278 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); 1279 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); 1280 1281 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) 1282 folio_end_writeback(folio); 1283 } 1284 1285 /* 1286 * We're now finished for good with this ioend structure. Update the page 1287 * state, release holds on bios, and finally free up memory. Do not use the 1288 * ioend after this. 1289 */ 1290 static u32 1291 iomap_finish_ioend(struct iomap_ioend *ioend, int error) 1292 { 1293 struct inode *inode = ioend->io_inode; 1294 struct bio *bio = &ioend->io_inline_bio; 1295 struct bio *last = ioend->io_bio, *next; 1296 u64 start = bio->bi_iter.bi_sector; 1297 loff_t offset = ioend->io_offset; 1298 bool quiet = bio_flagged(bio, BIO_QUIET); 1299 u32 folio_count = 0; 1300 1301 for (bio = &ioend->io_inline_bio; bio; bio = next) { 1302 struct folio_iter fi; 1303 1304 /* 1305 * For the last bio, bi_private points to the ioend, so we 1306 * need to explicitly end the iteration here. 1307 */ 1308 if (bio == last) 1309 next = NULL; 1310 else 1311 next = bio->bi_private; 1312 1313 /* walk all folios in bio, ending page IO on them */ 1314 bio_for_each_folio_all(fi, bio) { 1315 iomap_finish_folio_write(inode, fi.folio, fi.length, 1316 error); 1317 folio_count++; 1318 } 1319 bio_put(bio); 1320 } 1321 /* The ioend has been freed by bio_put() */ 1322 1323 if (unlikely(error && !quiet)) { 1324 printk_ratelimited(KERN_ERR 1325 "%s: writeback error on inode %lu, offset %lld, sector %llu", 1326 inode->i_sb->s_id, inode->i_ino, offset, start); 1327 } 1328 return folio_count; 1329 } 1330 1331 /* 1332 * Ioend completion routine for merged bios. This can only be called from task 1333 * contexts as merged ioends can be of unbound length. Hence we have to break up 1334 * the writeback completions into manageable chunks to avoid long scheduler 1335 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get 1336 * good batch processing throughput without creating adverse scheduler latency 1337 * conditions. 1338 */ 1339 void 1340 iomap_finish_ioends(struct iomap_ioend *ioend, int error) 1341 { 1342 struct list_head tmp; 1343 u32 completions; 1344 1345 might_sleep(); 1346 1347 list_replace_init(&ioend->io_list, &tmp); 1348 completions = iomap_finish_ioend(ioend, error); 1349 1350 while (!list_empty(&tmp)) { 1351 if (completions > IOEND_BATCH_SIZE * 8) { 1352 cond_resched(); 1353 completions = 0; 1354 } 1355 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); 1356 list_del_init(&ioend->io_list); 1357 completions += iomap_finish_ioend(ioend, error); 1358 } 1359 } 1360 EXPORT_SYMBOL_GPL(iomap_finish_ioends); 1361 1362 /* 1363 * We can merge two adjacent ioends if they have the same set of work to do. 1364 */ 1365 static bool 1366 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) 1367 { 1368 if (ioend->io_bio->bi_status != next->io_bio->bi_status) 1369 return false; 1370 if ((ioend->io_flags & IOMAP_F_SHARED) ^ 1371 (next->io_flags & IOMAP_F_SHARED)) 1372 return false; 1373 if ((ioend->io_type == IOMAP_UNWRITTEN) ^ 1374 (next->io_type == IOMAP_UNWRITTEN)) 1375 return false; 1376 if (ioend->io_offset + ioend->io_size != next->io_offset) 1377 return false; 1378 /* 1379 * Do not merge physically discontiguous ioends. The filesystem 1380 * completion functions will have to iterate the physical 1381 * discontiguities even if we merge the ioends at a logical level, so 1382 * we don't gain anything by merging physical discontiguities here. 1383 * 1384 * We cannot use bio->bi_iter.bi_sector here as it is modified during 1385 * submission so does not point to the start sector of the bio at 1386 * completion. 1387 */ 1388 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) 1389 return false; 1390 return true; 1391 } 1392 1393 void 1394 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) 1395 { 1396 struct iomap_ioend *next; 1397 1398 INIT_LIST_HEAD(&ioend->io_list); 1399 1400 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, 1401 io_list))) { 1402 if (!iomap_ioend_can_merge(ioend, next)) 1403 break; 1404 list_move_tail(&next->io_list, &ioend->io_list); 1405 ioend->io_size += next->io_size; 1406 } 1407 } 1408 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1409 1410 static int 1411 iomap_ioend_compare(void *priv, const struct list_head *a, 1412 const struct list_head *b) 1413 { 1414 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1415 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); 1416 1417 if (ia->io_offset < ib->io_offset) 1418 return -1; 1419 if (ia->io_offset > ib->io_offset) 1420 return 1; 1421 return 0; 1422 } 1423 1424 void 1425 iomap_sort_ioends(struct list_head *ioend_list) 1426 { 1427 list_sort(NULL, ioend_list, iomap_ioend_compare); 1428 } 1429 EXPORT_SYMBOL_GPL(iomap_sort_ioends); 1430 1431 static void iomap_writepage_end_bio(struct bio *bio) 1432 { 1433 struct iomap_ioend *ioend = bio->bi_private; 1434 1435 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); 1436 } 1437 1438 /* 1439 * Submit the final bio for an ioend. 1440 * 1441 * If @error is non-zero, it means that we have a situation where some part of 1442 * the submission process has failed after we've marked pages for writeback 1443 * and unlocked them. In this situation, we need to fail the bio instead of 1444 * submitting it. This typically only happens on a filesystem shutdown. 1445 */ 1446 static int 1447 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, 1448 int error) 1449 { 1450 ioend->io_bio->bi_private = ioend; 1451 ioend->io_bio->bi_end_io = iomap_writepage_end_bio; 1452 1453 if (wpc->ops->prepare_ioend) 1454 error = wpc->ops->prepare_ioend(ioend, error); 1455 if (error) { 1456 /* 1457 * If we're failing the IO now, just mark the ioend with an 1458 * error and finish it. This will run IO completion immediately 1459 * as there is only one reference to the ioend at this point in 1460 * time. 1461 */ 1462 ioend->io_bio->bi_status = errno_to_blk_status(error); 1463 bio_endio(ioend->io_bio); 1464 return error; 1465 } 1466 1467 submit_bio(ioend->io_bio); 1468 return 0; 1469 } 1470 1471 static struct iomap_ioend * 1472 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, 1473 loff_t offset, sector_t sector, struct writeback_control *wbc) 1474 { 1475 struct iomap_ioend *ioend; 1476 struct bio *bio; 1477 1478 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, 1479 REQ_OP_WRITE | wbc_to_write_flags(wbc), 1480 GFP_NOFS, &iomap_ioend_bioset); 1481 bio->bi_iter.bi_sector = sector; 1482 wbc_init_bio(wbc, bio); 1483 1484 ioend = container_of(bio, struct iomap_ioend, io_inline_bio); 1485 INIT_LIST_HEAD(&ioend->io_list); 1486 ioend->io_type = wpc->iomap.type; 1487 ioend->io_flags = wpc->iomap.flags; 1488 ioend->io_inode = inode; 1489 ioend->io_size = 0; 1490 ioend->io_folios = 0; 1491 ioend->io_offset = offset; 1492 ioend->io_bio = bio; 1493 ioend->io_sector = sector; 1494 return ioend; 1495 } 1496 1497 /* 1498 * Allocate a new bio, and chain the old bio to the new one. 1499 * 1500 * Note that we have to perform the chaining in this unintuitive order 1501 * so that the bi_private linkage is set up in the right direction for the 1502 * traversal in iomap_finish_ioend(). 1503 */ 1504 static struct bio * 1505 iomap_chain_bio(struct bio *prev) 1506 { 1507 struct bio *new; 1508 1509 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); 1510 bio_clone_blkg_association(new, prev); 1511 new->bi_iter.bi_sector = bio_end_sector(prev); 1512 1513 bio_chain(prev, new); 1514 bio_get(prev); /* for iomap_finish_ioend */ 1515 submit_bio(prev); 1516 return new; 1517 } 1518 1519 static bool 1520 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, 1521 sector_t sector) 1522 { 1523 if ((wpc->iomap.flags & IOMAP_F_SHARED) != 1524 (wpc->ioend->io_flags & IOMAP_F_SHARED)) 1525 return false; 1526 if (wpc->iomap.type != wpc->ioend->io_type) 1527 return false; 1528 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) 1529 return false; 1530 if (sector != bio_end_sector(wpc->ioend->io_bio)) 1531 return false; 1532 /* 1533 * Limit ioend bio chain lengths to minimise IO completion latency. This 1534 * also prevents long tight loops ending page writeback on all the 1535 * folios in the ioend. 1536 */ 1537 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) 1538 return false; 1539 return true; 1540 } 1541 1542 /* 1543 * Test to see if we have an existing ioend structure that we could append to 1544 * first; otherwise finish off the current ioend and start another. 1545 */ 1546 static void 1547 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, 1548 struct iomap_page *iop, struct iomap_writepage_ctx *wpc, 1549 struct writeback_control *wbc, struct list_head *iolist) 1550 { 1551 sector_t sector = iomap_sector(&wpc->iomap, pos); 1552 unsigned len = i_blocksize(inode); 1553 size_t poff = offset_in_folio(folio, pos); 1554 1555 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { 1556 if (wpc->ioend) 1557 list_add(&wpc->ioend->io_list, iolist); 1558 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); 1559 } 1560 1561 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { 1562 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); 1563 bio_add_folio(wpc->ioend->io_bio, folio, len, poff); 1564 } 1565 1566 if (iop) 1567 atomic_add(len, &iop->write_bytes_pending); 1568 wpc->ioend->io_size += len; 1569 wbc_account_cgroup_owner(wbc, &folio->page, len); 1570 } 1571 1572 /* 1573 * We implement an immediate ioend submission policy here to avoid needing to 1574 * chain multiple ioends and hence nest mempool allocations which can violate 1575 * the forward progress guarantees we need to provide. The current ioend we're 1576 * adding blocks to is cached in the writepage context, and if the new block 1577 * doesn't append to the cached ioend, it will create a new ioend and cache that 1578 * instead. 1579 * 1580 * If a new ioend is created and cached, the old ioend is returned and queued 1581 * locally for submission once the entire page is processed or an error has been 1582 * detected. While ioends are submitted immediately after they are completed, 1583 * batching optimisations are provided by higher level block plugging. 1584 * 1585 * At the end of a writeback pass, there will be a cached ioend remaining on the 1586 * writepage context that the caller will need to submit. 1587 */ 1588 static int 1589 iomap_writepage_map(struct iomap_writepage_ctx *wpc, 1590 struct writeback_control *wbc, struct inode *inode, 1591 struct folio *folio, u64 end_pos) 1592 { 1593 struct iomap_page *iop = iomap_page_create(inode, folio, 0); 1594 struct iomap_ioend *ioend, *next; 1595 unsigned len = i_blocksize(inode); 1596 unsigned nblocks = i_blocks_per_folio(inode, folio); 1597 u64 pos = folio_pos(folio); 1598 int error = 0, count = 0, i; 1599 LIST_HEAD(submit_list); 1600 1601 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); 1602 1603 /* 1604 * Walk through the folio to find areas to write back. If we 1605 * run off the end of the current map or find the current map 1606 * invalid, grab a new one. 1607 */ 1608 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { 1609 if (iop && !test_bit(i, iop->uptodate)) 1610 continue; 1611 1612 error = wpc->ops->map_blocks(wpc, inode, pos); 1613 if (error) 1614 break; 1615 trace_iomap_writepage_map(inode, &wpc->iomap); 1616 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) 1617 continue; 1618 if (wpc->iomap.type == IOMAP_HOLE) 1619 continue; 1620 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, 1621 &submit_list); 1622 count++; 1623 } 1624 if (count) 1625 wpc->ioend->io_folios++; 1626 1627 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); 1628 WARN_ON_ONCE(!folio_test_locked(folio)); 1629 WARN_ON_ONCE(folio_test_writeback(folio)); 1630 WARN_ON_ONCE(folio_test_dirty(folio)); 1631 1632 /* 1633 * We cannot cancel the ioend directly here on error. We may have 1634 * already set other pages under writeback and hence we have to run I/O 1635 * completion to mark the error state of the pages under writeback 1636 * appropriately. 1637 */ 1638 if (unlikely(error)) { 1639 /* 1640 * Let the filesystem know what portion of the current page 1641 * failed to map. If the page hasn't been added to ioend, it 1642 * won't be affected by I/O completion and we must unlock it 1643 * now. 1644 */ 1645 if (wpc->ops->discard_folio) 1646 wpc->ops->discard_folio(folio, pos); 1647 if (!count) { 1648 folio_unlock(folio); 1649 goto done; 1650 } 1651 } 1652 1653 folio_start_writeback(folio); 1654 folio_unlock(folio); 1655 1656 /* 1657 * Preserve the original error if there was one; catch 1658 * submission errors here and propagate into subsequent ioend 1659 * submissions. 1660 */ 1661 list_for_each_entry_safe(ioend, next, &submit_list, io_list) { 1662 int error2; 1663 1664 list_del_init(&ioend->io_list); 1665 error2 = iomap_submit_ioend(wpc, ioend, error); 1666 if (error2 && !error) 1667 error = error2; 1668 } 1669 1670 /* 1671 * We can end up here with no error and nothing to write only if we race 1672 * with a partial page truncate on a sub-page block sized filesystem. 1673 */ 1674 if (!count) 1675 folio_end_writeback(folio); 1676 done: 1677 mapping_set_error(inode->i_mapping, error); 1678 return error; 1679 } 1680 1681 /* 1682 * Write out a dirty page. 1683 * 1684 * For delalloc space on the page, we need to allocate space and flush it. 1685 * For unwritten space on the page, we need to start the conversion to 1686 * regular allocated space. 1687 */ 1688 static int iomap_do_writepage(struct folio *folio, 1689 struct writeback_control *wbc, void *data) 1690 { 1691 struct iomap_writepage_ctx *wpc = data; 1692 struct inode *inode = folio->mapping->host; 1693 u64 end_pos, isize; 1694 1695 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); 1696 1697 /* 1698 * Refuse to write the folio out if we're called from reclaim context. 1699 * 1700 * This avoids stack overflows when called from deeply used stacks in 1701 * random callers for direct reclaim or memcg reclaim. We explicitly 1702 * allow reclaim from kswapd as the stack usage there is relatively low. 1703 * 1704 * This should never happen except in the case of a VM regression so 1705 * warn about it. 1706 */ 1707 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1708 PF_MEMALLOC)) 1709 goto redirty; 1710 1711 /* 1712 * Is this folio beyond the end of the file? 1713 * 1714 * The folio index is less than the end_index, adjust the end_pos 1715 * to the highest offset that this folio should represent. 1716 * ----------------------------------------------------- 1717 * | file mapping | <EOF> | 1718 * ----------------------------------------------------- 1719 * | Page ... | Page N-2 | Page N-1 | Page N | | 1720 * ^--------------------------------^----------|-------- 1721 * | desired writeback range | see else | 1722 * ---------------------------------^------------------| 1723 */ 1724 isize = i_size_read(inode); 1725 end_pos = folio_pos(folio) + folio_size(folio); 1726 if (end_pos > isize) { 1727 /* 1728 * Check whether the page to write out is beyond or straddles 1729 * i_size or not. 1730 * ------------------------------------------------------- 1731 * | file mapping | <EOF> | 1732 * ------------------------------------------------------- 1733 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | 1734 * ^--------------------------------^-----------|--------- 1735 * | | Straddles | 1736 * ---------------------------------^-----------|--------| 1737 */ 1738 size_t poff = offset_in_folio(folio, isize); 1739 pgoff_t end_index = isize >> PAGE_SHIFT; 1740 1741 /* 1742 * Skip the page if it's fully outside i_size, e.g. 1743 * due to a truncate operation that's in progress. We've 1744 * cleaned this page and truncate will finish things off for 1745 * us. 1746 * 1747 * Note that the end_index is unsigned long. If the given 1748 * offset is greater than 16TB on a 32-bit system then if we 1749 * checked if the page is fully outside i_size with 1750 * "if (page->index >= end_index + 1)", "end_index + 1" would 1751 * overflow and evaluate to 0. Hence this page would be 1752 * redirtied and written out repeatedly, which would result in 1753 * an infinite loop; the user program performing this operation 1754 * would hang. Instead, we can detect this situation by 1755 * checking if the page is totally beyond i_size or if its 1756 * offset is just equal to the EOF. 1757 */ 1758 if (folio->index > end_index || 1759 (folio->index == end_index && poff == 0)) 1760 goto unlock; 1761 1762 /* 1763 * The page straddles i_size. It must be zeroed out on each 1764 * and every writepage invocation because it may be mmapped. 1765 * "A file is mapped in multiples of the page size. For a file 1766 * that is not a multiple of the page size, the remaining 1767 * memory is zeroed when mapped, and writes to that region are 1768 * not written out to the file." 1769 */ 1770 folio_zero_segment(folio, poff, folio_size(folio)); 1771 end_pos = isize; 1772 } 1773 1774 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); 1775 1776 redirty: 1777 folio_redirty_for_writepage(wbc, folio); 1778 unlock: 1779 folio_unlock(folio); 1780 return 0; 1781 } 1782 1783 int 1784 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, 1785 struct iomap_writepage_ctx *wpc, 1786 const struct iomap_writeback_ops *ops) 1787 { 1788 int ret; 1789 1790 wpc->ops = ops; 1791 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); 1792 if (!wpc->ioend) 1793 return ret; 1794 return iomap_submit_ioend(wpc, wpc->ioend, ret); 1795 } 1796 EXPORT_SYMBOL_GPL(iomap_writepages); 1797 1798 static int __init iomap_init(void) 1799 { 1800 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), 1801 offsetof(struct iomap_ioend, io_inline_bio), 1802 BIOSET_NEED_BVECS); 1803 } 1804 fs_initcall(iomap_init); 1805