1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/mpage.c 4 * 5 * Copyright (C) 2002, Linus Torvalds. 6 * 7 * Contains functions related to preparing and submitting BIOs which contain 8 * multiple pagecache pages. 9 * 10 * 15May2002 Andrew Morton 11 * Initial version 12 * 27Jun2002 axboe@suse.de 13 * use bio_add_page() to build bio's just the right size 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/mm.h> 19 #include <linux/kdev_t.h> 20 #include <linux/gfp.h> 21 #include <linux/bio.h> 22 #include <linux/fs.h> 23 #include <linux/buffer_head.h> 24 #include <linux/blkdev.h> 25 #include <linux/highmem.h> 26 #include <linux/prefetch.h> 27 #include <linux/mpage.h> 28 #include <linux/mm_inline.h> 29 #include <linux/writeback.h> 30 #include <linux/backing-dev.h> 31 #include <linux/pagevec.h> 32 #include "internal.h" 33 34 /* 35 * I/O completion handler for multipage BIOs. 36 * 37 * The mpage code never puts partial pages into a BIO (except for end-of-file). 38 * If a page does not map to a contiguous run of blocks then it simply falls 39 * back to block_read_full_folio(). 40 * 41 * Why is this? If a page's completion depends on a number of different BIOs 42 * which can complete in any order (or at the same time) then determining the 43 * status of that page is hard. See end_buffer_async_read() for the details. 44 * There is no point in duplicating all that complexity. 45 */ 46 static void mpage_end_io(struct bio *bio) 47 { 48 struct bio_vec *bv; 49 struct bvec_iter_all iter_all; 50 51 bio_for_each_segment_all(bv, bio, iter_all) { 52 struct page *page = bv->bv_page; 53 page_endio(page, bio_op(bio), 54 blk_status_to_errno(bio->bi_status)); 55 } 56 57 bio_put(bio); 58 } 59 60 static struct bio *mpage_bio_submit(struct bio *bio) 61 { 62 bio->bi_end_io = mpage_end_io; 63 guard_bio_eod(bio); 64 submit_bio(bio); 65 return NULL; 66 } 67 68 /* 69 * support function for mpage_readahead. The fs supplied get_block might 70 * return an up to date buffer. This is used to map that buffer into 71 * the page, which allows read_folio to avoid triggering a duplicate call 72 * to get_block. 73 * 74 * The idea is to avoid adding buffers to pages that don't already have 75 * them. So when the buffer is up to date and the page size == block size, 76 * this marks the page up to date instead of adding new buffers. 77 */ 78 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh, 79 int page_block) 80 { 81 struct inode *inode = folio->mapping->host; 82 struct buffer_head *page_bh, *head; 83 int block = 0; 84 85 head = folio_buffers(folio); 86 if (!head) { 87 /* 88 * don't make any buffers if there is only one buffer on 89 * the folio and the folio just needs to be set up to date 90 */ 91 if (inode->i_blkbits == PAGE_SHIFT && 92 buffer_uptodate(bh)) { 93 folio_mark_uptodate(folio); 94 return; 95 } 96 create_empty_buffers(&folio->page, i_blocksize(inode), 0); 97 head = folio_buffers(folio); 98 } 99 100 page_bh = head; 101 do { 102 if (block == page_block) { 103 page_bh->b_state = bh->b_state; 104 page_bh->b_bdev = bh->b_bdev; 105 page_bh->b_blocknr = bh->b_blocknr; 106 break; 107 } 108 page_bh = page_bh->b_this_page; 109 block++; 110 } while (page_bh != head); 111 } 112 113 struct mpage_readpage_args { 114 struct bio *bio; 115 struct folio *folio; 116 unsigned int nr_pages; 117 bool is_readahead; 118 sector_t last_block_in_bio; 119 struct buffer_head map_bh; 120 unsigned long first_logical_block; 121 get_block_t *get_block; 122 }; 123 124 /* 125 * This is the worker routine which does all the work of mapping the disk 126 * blocks and constructs largest possible bios, submits them for IO if the 127 * blocks are not contiguous on the disk. 128 * 129 * We pass a buffer_head back and forth and use its buffer_mapped() flag to 130 * represent the validity of its disk mapping and to decide when to do the next 131 * get_block() call. 132 */ 133 static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) 134 { 135 struct folio *folio = args->folio; 136 struct inode *inode = folio->mapping->host; 137 const unsigned blkbits = inode->i_blkbits; 138 const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 139 const unsigned blocksize = 1 << blkbits; 140 struct buffer_head *map_bh = &args->map_bh; 141 sector_t block_in_file; 142 sector_t last_block; 143 sector_t last_block_in_file; 144 sector_t blocks[MAX_BUF_PER_PAGE]; 145 unsigned page_block; 146 unsigned first_hole = blocks_per_page; 147 struct block_device *bdev = NULL; 148 int length; 149 int fully_mapped = 1; 150 blk_opf_t opf = REQ_OP_READ; 151 unsigned nblocks; 152 unsigned relative_block; 153 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); 154 155 /* MAX_BUF_PER_PAGE, for example */ 156 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 157 158 if (args->is_readahead) { 159 opf |= REQ_RAHEAD; 160 gfp |= __GFP_NORETRY | __GFP_NOWARN; 161 } 162 163 if (folio_buffers(folio)) 164 goto confused; 165 166 block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits); 167 last_block = block_in_file + args->nr_pages * blocks_per_page; 168 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 169 if (last_block > last_block_in_file) 170 last_block = last_block_in_file; 171 page_block = 0; 172 173 /* 174 * Map blocks using the result from the previous get_blocks call first. 175 */ 176 nblocks = map_bh->b_size >> blkbits; 177 if (buffer_mapped(map_bh) && 178 block_in_file > args->first_logical_block && 179 block_in_file < (args->first_logical_block + nblocks)) { 180 unsigned map_offset = block_in_file - args->first_logical_block; 181 unsigned last = nblocks - map_offset; 182 183 for (relative_block = 0; ; relative_block++) { 184 if (relative_block == last) { 185 clear_buffer_mapped(map_bh); 186 break; 187 } 188 if (page_block == blocks_per_page) 189 break; 190 blocks[page_block] = map_bh->b_blocknr + map_offset + 191 relative_block; 192 page_block++; 193 block_in_file++; 194 } 195 bdev = map_bh->b_bdev; 196 } 197 198 /* 199 * Then do more get_blocks calls until we are done with this folio. 200 */ 201 map_bh->b_page = &folio->page; 202 while (page_block < blocks_per_page) { 203 map_bh->b_state = 0; 204 map_bh->b_size = 0; 205 206 if (block_in_file < last_block) { 207 map_bh->b_size = (last_block-block_in_file) << blkbits; 208 if (args->get_block(inode, block_in_file, map_bh, 0)) 209 goto confused; 210 args->first_logical_block = block_in_file; 211 } 212 213 if (!buffer_mapped(map_bh)) { 214 fully_mapped = 0; 215 if (first_hole == blocks_per_page) 216 first_hole = page_block; 217 page_block++; 218 block_in_file++; 219 continue; 220 } 221 222 /* some filesystems will copy data into the page during 223 * the get_block call, in which case we don't want to 224 * read it again. map_buffer_to_folio copies the data 225 * we just collected from get_block into the folio's buffers 226 * so read_folio doesn't have to repeat the get_block call 227 */ 228 if (buffer_uptodate(map_bh)) { 229 map_buffer_to_folio(folio, map_bh, page_block); 230 goto confused; 231 } 232 233 if (first_hole != blocks_per_page) 234 goto confused; /* hole -> non-hole */ 235 236 /* Contiguous blocks? */ 237 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) 238 goto confused; 239 nblocks = map_bh->b_size >> blkbits; 240 for (relative_block = 0; ; relative_block++) { 241 if (relative_block == nblocks) { 242 clear_buffer_mapped(map_bh); 243 break; 244 } else if (page_block == blocks_per_page) 245 break; 246 blocks[page_block] = map_bh->b_blocknr+relative_block; 247 page_block++; 248 block_in_file++; 249 } 250 bdev = map_bh->b_bdev; 251 } 252 253 if (first_hole != blocks_per_page) { 254 folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE); 255 if (first_hole == 0) { 256 folio_mark_uptodate(folio); 257 folio_unlock(folio); 258 goto out; 259 } 260 } else if (fully_mapped) { 261 folio_set_mappedtodisk(folio); 262 } 263 264 /* 265 * This folio will go to BIO. Do we need to send this BIO off first? 266 */ 267 if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) 268 args->bio = mpage_bio_submit(args->bio); 269 270 alloc_new: 271 if (args->bio == NULL) { 272 if (first_hole == blocks_per_page) { 273 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), 274 &folio->page)) 275 goto out; 276 } 277 args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf, 278 gfp); 279 if (args->bio == NULL) 280 goto confused; 281 args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); 282 } 283 284 length = first_hole << blkbits; 285 if (!bio_add_folio(args->bio, folio, length, 0)) { 286 args->bio = mpage_bio_submit(args->bio); 287 goto alloc_new; 288 } 289 290 relative_block = block_in_file - args->first_logical_block; 291 nblocks = map_bh->b_size >> blkbits; 292 if ((buffer_boundary(map_bh) && relative_block == nblocks) || 293 (first_hole != blocks_per_page)) 294 args->bio = mpage_bio_submit(args->bio); 295 else 296 args->last_block_in_bio = blocks[blocks_per_page - 1]; 297 out: 298 return args->bio; 299 300 confused: 301 if (args->bio) 302 args->bio = mpage_bio_submit(args->bio); 303 if (!folio_test_uptodate(folio)) 304 block_read_full_folio(folio, args->get_block); 305 else 306 folio_unlock(folio); 307 goto out; 308 } 309 310 /** 311 * mpage_readahead - start reads against pages 312 * @rac: Describes which pages to read. 313 * @get_block: The filesystem's block mapper function. 314 * 315 * This function walks the pages and the blocks within each page, building and 316 * emitting large BIOs. 317 * 318 * If anything unusual happens, such as: 319 * 320 * - encountering a page which has buffers 321 * - encountering a page which has a non-hole after a hole 322 * - encountering a page with non-contiguous blocks 323 * 324 * then this code just gives up and calls the buffer_head-based read function. 325 * It does handle a page which has holes at the end - that is a common case: 326 * the end-of-file on blocksize < PAGE_SIZE setups. 327 * 328 * BH_Boundary explanation: 329 * 330 * There is a problem. The mpage read code assembles several pages, gets all 331 * their disk mappings, and then submits them all. That's fine, but obtaining 332 * the disk mappings may require I/O. Reads of indirect blocks, for example. 333 * 334 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be 335 * submitted in the following order: 336 * 337 * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 338 * 339 * because the indirect block has to be read to get the mappings of blocks 340 * 13,14,15,16. Obviously, this impacts performance. 341 * 342 * So what we do it to allow the filesystem's get_block() function to set 343 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block 344 * after this one will require I/O against a block which is probably close to 345 * this one. So you should push what I/O you have currently accumulated. 346 * 347 * This all causes the disk requests to be issued in the correct order. 348 */ 349 void mpage_readahead(struct readahead_control *rac, get_block_t get_block) 350 { 351 struct folio *folio; 352 struct mpage_readpage_args args = { 353 .get_block = get_block, 354 .is_readahead = true, 355 }; 356 357 while ((folio = readahead_folio(rac))) { 358 prefetchw(&folio->flags); 359 args.folio = folio; 360 args.nr_pages = readahead_count(rac); 361 args.bio = do_mpage_readpage(&args); 362 } 363 if (args.bio) 364 mpage_bio_submit(args.bio); 365 } 366 EXPORT_SYMBOL(mpage_readahead); 367 368 /* 369 * This isn't called much at all 370 */ 371 int mpage_read_folio(struct folio *folio, get_block_t get_block) 372 { 373 struct mpage_readpage_args args = { 374 .folio = folio, 375 .nr_pages = 1, 376 .get_block = get_block, 377 }; 378 379 args.bio = do_mpage_readpage(&args); 380 if (args.bio) 381 mpage_bio_submit(args.bio); 382 return 0; 383 } 384 EXPORT_SYMBOL(mpage_read_folio); 385 386 /* 387 * Writing is not so simple. 388 * 389 * If the page has buffers then they will be used for obtaining the disk 390 * mapping. We only support pages which are fully mapped-and-dirty, with a 391 * special case for pages which are unmapped at the end: end-of-file. 392 * 393 * If the page has no buffers (preferred) then the page is mapped here. 394 * 395 * If all blocks are found to be contiguous then the page can go into the 396 * BIO. Otherwise fall back to the mapping's writepage(). 397 * 398 * FIXME: This code wants an estimate of how many pages are still to be 399 * written, so it can intelligently allocate a suitably-sized BIO. For now, 400 * just allocate full-size (16-page) BIOs. 401 */ 402 403 struct mpage_data { 404 struct bio *bio; 405 sector_t last_block_in_bio; 406 get_block_t *get_block; 407 }; 408 409 /* 410 * We have our BIO, so we can now mark the buffers clean. Make 411 * sure to only clean buffers which we know we'll be writing. 412 */ 413 static void clean_buffers(struct page *page, unsigned first_unmapped) 414 { 415 unsigned buffer_counter = 0; 416 struct buffer_head *bh, *head; 417 if (!page_has_buffers(page)) 418 return; 419 head = page_buffers(page); 420 bh = head; 421 422 do { 423 if (buffer_counter++ == first_unmapped) 424 break; 425 clear_buffer_dirty(bh); 426 bh = bh->b_this_page; 427 } while (bh != head); 428 429 /* 430 * we cannot drop the bh if the page is not uptodate or a concurrent 431 * read_folio would fail to serialize with the bh and it would read from 432 * disk before we reach the platter. 433 */ 434 if (buffer_heads_over_limit && PageUptodate(page)) 435 try_to_free_buffers(page_folio(page)); 436 } 437 438 /* 439 * For situations where we want to clean all buffers attached to a page. 440 * We don't need to calculate how many buffers are attached to the page, 441 * we just need to specify a number larger than the maximum number of buffers. 442 */ 443 void clean_page_buffers(struct page *page) 444 { 445 clean_buffers(page, ~0U); 446 } 447 448 static int __mpage_writepage(struct page *page, struct writeback_control *wbc, 449 void *data) 450 { 451 struct mpage_data *mpd = data; 452 struct bio *bio = mpd->bio; 453 struct address_space *mapping = page->mapping; 454 struct inode *inode = page->mapping->host; 455 const unsigned blkbits = inode->i_blkbits; 456 unsigned long end_index; 457 const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 458 sector_t last_block; 459 sector_t block_in_file; 460 sector_t blocks[MAX_BUF_PER_PAGE]; 461 unsigned page_block; 462 unsigned first_unmapped = blocks_per_page; 463 struct block_device *bdev = NULL; 464 int boundary = 0; 465 sector_t boundary_block = 0; 466 struct block_device *boundary_bdev = NULL; 467 int length; 468 struct buffer_head map_bh; 469 loff_t i_size = i_size_read(inode); 470 int ret = 0; 471 472 if (page_has_buffers(page)) { 473 struct buffer_head *head = page_buffers(page); 474 struct buffer_head *bh = head; 475 476 /* If they're all mapped and dirty, do it */ 477 page_block = 0; 478 do { 479 BUG_ON(buffer_locked(bh)); 480 if (!buffer_mapped(bh)) { 481 /* 482 * unmapped dirty buffers are created by 483 * block_dirty_folio -> mmapped data 484 */ 485 if (buffer_dirty(bh)) 486 goto confused; 487 if (first_unmapped == blocks_per_page) 488 first_unmapped = page_block; 489 continue; 490 } 491 492 if (first_unmapped != blocks_per_page) 493 goto confused; /* hole -> non-hole */ 494 495 if (!buffer_dirty(bh) || !buffer_uptodate(bh)) 496 goto confused; 497 if (page_block) { 498 if (bh->b_blocknr != blocks[page_block-1] + 1) 499 goto confused; 500 } 501 blocks[page_block++] = bh->b_blocknr; 502 boundary = buffer_boundary(bh); 503 if (boundary) { 504 boundary_block = bh->b_blocknr; 505 boundary_bdev = bh->b_bdev; 506 } 507 bdev = bh->b_bdev; 508 } while ((bh = bh->b_this_page) != head); 509 510 if (first_unmapped) 511 goto page_is_mapped; 512 513 /* 514 * Page has buffers, but they are all unmapped. The page was 515 * created by pagein or read over a hole which was handled by 516 * block_read_full_folio(). If this address_space is also 517 * using mpage_readahead then this can rarely happen. 518 */ 519 goto confused; 520 } 521 522 /* 523 * The page has no buffers: map it to disk 524 */ 525 BUG_ON(!PageUptodate(page)); 526 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 527 last_block = (i_size - 1) >> blkbits; 528 map_bh.b_page = page; 529 for (page_block = 0; page_block < blocks_per_page; ) { 530 531 map_bh.b_state = 0; 532 map_bh.b_size = 1 << blkbits; 533 if (mpd->get_block(inode, block_in_file, &map_bh, 1)) 534 goto confused; 535 if (buffer_new(&map_bh)) 536 clean_bdev_bh_alias(&map_bh); 537 if (buffer_boundary(&map_bh)) { 538 boundary_block = map_bh.b_blocknr; 539 boundary_bdev = map_bh.b_bdev; 540 } 541 if (page_block) { 542 if (map_bh.b_blocknr != blocks[page_block-1] + 1) 543 goto confused; 544 } 545 blocks[page_block++] = map_bh.b_blocknr; 546 boundary = buffer_boundary(&map_bh); 547 bdev = map_bh.b_bdev; 548 if (block_in_file == last_block) 549 break; 550 block_in_file++; 551 } 552 BUG_ON(page_block == 0); 553 554 first_unmapped = page_block; 555 556 page_is_mapped: 557 end_index = i_size >> PAGE_SHIFT; 558 if (page->index >= end_index) { 559 /* 560 * The page straddles i_size. It must be zeroed out on each 561 * and every writepage invocation because it may be mmapped. 562 * "A file is mapped in multiples of the page size. For a file 563 * that is not a multiple of the page size, the remaining memory 564 * is zeroed when mapped, and writes to that region are not 565 * written out to the file." 566 */ 567 unsigned offset = i_size & (PAGE_SIZE - 1); 568 569 if (page->index > end_index || !offset) 570 goto confused; 571 zero_user_segment(page, offset, PAGE_SIZE); 572 } 573 574 /* 575 * This page will go to BIO. Do we need to send this BIO off first? 576 */ 577 if (bio && mpd->last_block_in_bio != blocks[0] - 1) 578 bio = mpage_bio_submit(bio); 579 580 alloc_new: 581 if (bio == NULL) { 582 if (first_unmapped == blocks_per_page) { 583 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), 584 page, wbc)) 585 goto out; 586 } 587 bio = bio_alloc(bdev, BIO_MAX_VECS, 588 REQ_OP_WRITE | wbc_to_write_flags(wbc), 589 GFP_NOFS); 590 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); 591 wbc_init_bio(wbc, bio); 592 } 593 594 /* 595 * Must try to add the page before marking the buffer clean or 596 * the confused fail path above (OOM) will be very confused when 597 * it finds all bh marked clean (i.e. it will not write anything) 598 */ 599 wbc_account_cgroup_owner(wbc, page, PAGE_SIZE); 600 length = first_unmapped << blkbits; 601 if (bio_add_page(bio, page, length, 0) < length) { 602 bio = mpage_bio_submit(bio); 603 goto alloc_new; 604 } 605 606 clean_buffers(page, first_unmapped); 607 608 BUG_ON(PageWriteback(page)); 609 set_page_writeback(page); 610 unlock_page(page); 611 if (boundary || (first_unmapped != blocks_per_page)) { 612 bio = mpage_bio_submit(bio); 613 if (boundary_block) { 614 write_boundary_block(boundary_bdev, 615 boundary_block, 1 << blkbits); 616 } 617 } else { 618 mpd->last_block_in_bio = blocks[blocks_per_page - 1]; 619 } 620 goto out; 621 622 confused: 623 if (bio) 624 bio = mpage_bio_submit(bio); 625 626 /* 627 * The caller has a ref on the inode, so *mapping is stable 628 */ 629 ret = block_write_full_page(page, mpd->get_block, wbc); 630 mapping_set_error(mapping, ret); 631 out: 632 mpd->bio = bio; 633 return ret; 634 } 635 636 /** 637 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them 638 * @mapping: address space structure to write 639 * @wbc: subtract the number of written pages from *@wbc->nr_to_write 640 * @get_block: the filesystem's block mapper function. 641 * 642 * This is a library function, which implements the writepages() 643 * address_space_operation. 644 * 645 * If a page is already under I/O, generic_writepages() skips it, even 646 * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 647 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 648 * and msync() need to guarantee that all the data which was dirty at the time 649 * the call was made get new I/O started against them. If wbc->sync_mode is 650 * WB_SYNC_ALL then we were called for data integrity and we must wait for 651 * existing IO to complete. 652 */ 653 int 654 mpage_writepages(struct address_space *mapping, 655 struct writeback_control *wbc, get_block_t get_block) 656 { 657 struct mpage_data mpd = { 658 .get_block = get_block, 659 }; 660 struct blk_plug plug; 661 int ret; 662 663 blk_start_plug(&plug); 664 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); 665 if (mpd.bio) 666 mpage_bio_submit(mpd.bio); 667 blk_finish_plug(&plug); 668 return ret; 669 } 670 EXPORT_SYMBOL(mpage_writepages); 671