1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/buffer.c 4 * 5 * Copyright (C) 1991, 1992, 2002 Linus Torvalds 6 */ 7 8 /* 9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95 10 * 11 * Removed a lot of unnecessary code and simplified things now that 12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96 13 * 14 * Speed up hash, lru, and free list operations. Use gfp() for allocating 15 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM 16 * 17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK 18 * 19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/syscalls.h> 25 #include <linux/fs.h> 26 #include <linux/iomap.h> 27 #include <linux/mm.h> 28 #include <linux/percpu.h> 29 #include <linux/slab.h> 30 #include <linux/capability.h> 31 #include <linux/blkdev.h> 32 #include <linux/file.h> 33 #include <linux/quotaops.h> 34 #include <linux/highmem.h> 35 #include <linux/export.h> 36 #include <linux/backing-dev.h> 37 #include <linux/writeback.h> 38 #include <linux/hash.h> 39 #include <linux/suspend.h> 40 #include <linux/buffer_head.h> 41 #include <linux/task_io_accounting_ops.h> 42 #include <linux/bio.h> 43 #include <linux/cpu.h> 44 #include <linux/bitops.h> 45 #include <linux/mpage.h> 46 #include <linux/bit_spinlock.h> 47 #include <linux/pagevec.h> 48 #include <linux/sched/mm.h> 49 #include <trace/events/block.h> 50 #include <linux/fscrypt.h> 51 #include <linux/fsverity.h> 52 53 #include "internal.h" 54 55 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 57 struct writeback_control *wbc); 58 59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 60 61 inline void touch_buffer(struct buffer_head *bh) 62 { 63 trace_block_touch_buffer(bh); 64 folio_mark_accessed(bh->b_folio); 65 } 66 EXPORT_SYMBOL(touch_buffer); 67 68 void __lock_buffer(struct buffer_head *bh) 69 { 70 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 71 } 72 EXPORT_SYMBOL(__lock_buffer); 73 74 void unlock_buffer(struct buffer_head *bh) 75 { 76 clear_bit_unlock(BH_Lock, &bh->b_state); 77 smp_mb__after_atomic(); 78 wake_up_bit(&bh->b_state, BH_Lock); 79 } 80 EXPORT_SYMBOL(unlock_buffer); 81 82 /* 83 * Returns if the folio has dirty or writeback buffers. If all the buffers 84 * are unlocked and clean then the folio_test_dirty information is stale. If 85 * any of the buffers are locked, it is assumed they are locked for IO. 86 */ 87 void buffer_check_dirty_writeback(struct folio *folio, 88 bool *dirty, bool *writeback) 89 { 90 struct buffer_head *head, *bh; 91 *dirty = false; 92 *writeback = false; 93 94 BUG_ON(!folio_test_locked(folio)); 95 96 head = folio_buffers(folio); 97 if (!head) 98 return; 99 100 if (folio_test_writeback(folio)) 101 *writeback = true; 102 103 bh = head; 104 do { 105 if (buffer_locked(bh)) 106 *writeback = true; 107 108 if (buffer_dirty(bh)) 109 *dirty = true; 110 111 bh = bh->b_this_page; 112 } while (bh != head); 113 } 114 115 /* 116 * Block until a buffer comes unlocked. This doesn't stop it 117 * from becoming locked again - you have to lock it yourself 118 * if you want to preserve its state. 119 */ 120 void __wait_on_buffer(struct buffer_head * bh) 121 { 122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE); 123 } 124 EXPORT_SYMBOL(__wait_on_buffer); 125 126 static void buffer_io_error(struct buffer_head *bh, char *msg) 127 { 128 if (!test_bit(BH_Quiet, &bh->b_state)) 129 printk_ratelimited(KERN_ERR 130 "Buffer I/O error on dev %pg, logical block %llu%s\n", 131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg); 132 } 133 134 /* 135 * End-of-IO handler helper function which does not touch the bh after 136 * unlocking it. 137 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but 138 * a race there is benign: unlock_buffer() only use the bh's address for 139 * hashing after unlocking the buffer, so it doesn't actually touch the bh 140 * itself. 141 */ 142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) 143 { 144 if (uptodate) { 145 set_buffer_uptodate(bh); 146 } else { 147 /* This happens, due to failed read-ahead attempts. */ 148 clear_buffer_uptodate(bh); 149 } 150 unlock_buffer(bh); 151 } 152 153 /* 154 * Default synchronous end-of-IO handler.. Just mark it up-to-date and 155 * unlock the buffer. 156 */ 157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate) 158 { 159 __end_buffer_read_notouch(bh, uptodate); 160 put_bh(bh); 161 } 162 EXPORT_SYMBOL(end_buffer_read_sync); 163 164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 165 { 166 if (uptodate) { 167 set_buffer_uptodate(bh); 168 } else { 169 buffer_io_error(bh, ", lost sync page write"); 170 mark_buffer_write_io_error(bh); 171 clear_buffer_uptodate(bh); 172 } 173 unlock_buffer(bh); 174 put_bh(bh); 175 } 176 EXPORT_SYMBOL(end_buffer_write_sync); 177 178 /* 179 * Various filesystems appear to want __find_get_block to be non-blocking. 180 * But it's the page lock which protects the buffers. To get around this, 181 * we get exclusion from try_to_free_buffers with the blockdev mapping's 182 * private_lock. 183 * 184 * Hack idea: for the blockdev mapping, private_lock contention 185 * may be quite high. This code could TryLock the page, and if that 186 * succeeds, there is no need to take private_lock. 187 */ 188 static struct buffer_head * 189 __find_get_block_slow(struct block_device *bdev, sector_t block) 190 { 191 struct inode *bd_inode = bdev->bd_inode; 192 struct address_space *bd_mapping = bd_inode->i_mapping; 193 struct buffer_head *ret = NULL; 194 pgoff_t index; 195 struct buffer_head *bh; 196 struct buffer_head *head; 197 struct folio *folio; 198 int all_mapped = 1; 199 static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); 200 201 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 202 folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0); 203 if (IS_ERR(folio)) 204 goto out; 205 206 spin_lock(&bd_mapping->private_lock); 207 head = folio_buffers(folio); 208 if (!head) 209 goto out_unlock; 210 bh = head; 211 do { 212 if (!buffer_mapped(bh)) 213 all_mapped = 0; 214 else if (bh->b_blocknr == block) { 215 ret = bh; 216 get_bh(bh); 217 goto out_unlock; 218 } 219 bh = bh->b_this_page; 220 } while (bh != head); 221 222 /* we might be here because some of the buffers on this page are 223 * not mapped. This is due to various races between 224 * file io on the block device and getblk. It gets dealt with 225 * elsewhere, don't buffer_error if we had some unmapped buffers 226 */ 227 ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); 228 if (all_mapped && __ratelimit(&last_warned)) { 229 printk("__find_get_block_slow() failed. block=%llu, " 230 "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " 231 "device %pg blocksize: %d\n", 232 (unsigned long long)block, 233 (unsigned long long)bh->b_blocknr, 234 bh->b_state, bh->b_size, bdev, 235 1 << bd_inode->i_blkbits); 236 } 237 out_unlock: 238 spin_unlock(&bd_mapping->private_lock); 239 folio_put(folio); 240 out: 241 return ret; 242 } 243 244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate) 245 { 246 unsigned long flags; 247 struct buffer_head *first; 248 struct buffer_head *tmp; 249 struct folio *folio; 250 int folio_uptodate = 1; 251 252 BUG_ON(!buffer_async_read(bh)); 253 254 folio = bh->b_folio; 255 if (uptodate) { 256 set_buffer_uptodate(bh); 257 } else { 258 clear_buffer_uptodate(bh); 259 buffer_io_error(bh, ", async page read"); 260 folio_set_error(folio); 261 } 262 263 /* 264 * Be _very_ careful from here on. Bad things can happen if 265 * two buffer heads end IO at almost the same time and both 266 * decide that the page is now completely done. 267 */ 268 first = folio_buffers(folio); 269 spin_lock_irqsave(&first->b_uptodate_lock, flags); 270 clear_buffer_async_read(bh); 271 unlock_buffer(bh); 272 tmp = bh; 273 do { 274 if (!buffer_uptodate(tmp)) 275 folio_uptodate = 0; 276 if (buffer_async_read(tmp)) { 277 BUG_ON(!buffer_locked(tmp)); 278 goto still_busy; 279 } 280 tmp = tmp->b_this_page; 281 } while (tmp != bh); 282 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 283 284 /* 285 * If all of the buffers are uptodate then we can set the page 286 * uptodate. 287 */ 288 if (folio_uptodate) 289 folio_mark_uptodate(folio); 290 folio_unlock(folio); 291 return; 292 293 still_busy: 294 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 295 return; 296 } 297 298 struct postprocess_bh_ctx { 299 struct work_struct work; 300 struct buffer_head *bh; 301 }; 302 303 static void verify_bh(struct work_struct *work) 304 { 305 struct postprocess_bh_ctx *ctx = 306 container_of(work, struct postprocess_bh_ctx, work); 307 struct buffer_head *bh = ctx->bh; 308 bool valid; 309 310 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh)); 311 end_buffer_async_read(bh, valid); 312 kfree(ctx); 313 } 314 315 static bool need_fsverity(struct buffer_head *bh) 316 { 317 struct folio *folio = bh->b_folio; 318 struct inode *inode = folio->mapping->host; 319 320 return fsverity_active(inode) && 321 /* needed by ext4 */ 322 folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 323 } 324 325 static void decrypt_bh(struct work_struct *work) 326 { 327 struct postprocess_bh_ctx *ctx = 328 container_of(work, struct postprocess_bh_ctx, work); 329 struct buffer_head *bh = ctx->bh; 330 int err; 331 332 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size, 333 bh_offset(bh)); 334 if (err == 0 && need_fsverity(bh)) { 335 /* 336 * We use different work queues for decryption and for verity 337 * because verity may require reading metadata pages that need 338 * decryption, and we shouldn't recurse to the same workqueue. 339 */ 340 INIT_WORK(&ctx->work, verify_bh); 341 fsverity_enqueue_verify_work(&ctx->work); 342 return; 343 } 344 end_buffer_async_read(bh, err == 0); 345 kfree(ctx); 346 } 347 348 /* 349 * I/O completion handler for block_read_full_folio() - pages 350 * which come unlocked at the end of I/O. 351 */ 352 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) 353 { 354 struct inode *inode = bh->b_folio->mapping->host; 355 bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode); 356 bool verify = need_fsverity(bh); 357 358 /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */ 359 if (uptodate && (decrypt || verify)) { 360 struct postprocess_bh_ctx *ctx = 361 kmalloc(sizeof(*ctx), GFP_ATOMIC); 362 363 if (ctx) { 364 ctx->bh = bh; 365 if (decrypt) { 366 INIT_WORK(&ctx->work, decrypt_bh); 367 fscrypt_enqueue_decrypt_work(&ctx->work); 368 } else { 369 INIT_WORK(&ctx->work, verify_bh); 370 fsverity_enqueue_verify_work(&ctx->work); 371 } 372 return; 373 } 374 uptodate = 0; 375 } 376 end_buffer_async_read(bh, uptodate); 377 } 378 379 /* 380 * Completion handler for block_write_full_page() - pages which are unlocked 381 * during I/O, and which have PageWriteback cleared upon I/O completion. 382 */ 383 void end_buffer_async_write(struct buffer_head *bh, int uptodate) 384 { 385 unsigned long flags; 386 struct buffer_head *first; 387 struct buffer_head *tmp; 388 struct folio *folio; 389 390 BUG_ON(!buffer_async_write(bh)); 391 392 folio = bh->b_folio; 393 if (uptodate) { 394 set_buffer_uptodate(bh); 395 } else { 396 buffer_io_error(bh, ", lost async page write"); 397 mark_buffer_write_io_error(bh); 398 clear_buffer_uptodate(bh); 399 folio_set_error(folio); 400 } 401 402 first = folio_buffers(folio); 403 spin_lock_irqsave(&first->b_uptodate_lock, flags); 404 405 clear_buffer_async_write(bh); 406 unlock_buffer(bh); 407 tmp = bh->b_this_page; 408 while (tmp != bh) { 409 if (buffer_async_write(tmp)) { 410 BUG_ON(!buffer_locked(tmp)); 411 goto still_busy; 412 } 413 tmp = tmp->b_this_page; 414 } 415 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 416 folio_end_writeback(folio); 417 return; 418 419 still_busy: 420 spin_unlock_irqrestore(&first->b_uptodate_lock, flags); 421 return; 422 } 423 EXPORT_SYMBOL(end_buffer_async_write); 424 425 /* 426 * If a page's buffers are under async readin (end_buffer_async_read 427 * completion) then there is a possibility that another thread of 428 * control could lock one of the buffers after it has completed 429 * but while some of the other buffers have not completed. This 430 * locked buffer would confuse end_buffer_async_read() into not unlocking 431 * the page. So the absence of BH_Async_Read tells end_buffer_async_read() 432 * that this buffer is not under async I/O. 433 * 434 * The page comes unlocked when it has no locked buffer_async buffers 435 * left. 436 * 437 * PageLocked prevents anyone starting new async I/O reads any of 438 * the buffers. 439 * 440 * PageWriteback is used to prevent simultaneous writeout of the same 441 * page. 442 * 443 * PageLocked prevents anyone from starting writeback of a page which is 444 * under read I/O (PageWriteback is only ever set against a locked page). 445 */ 446 static void mark_buffer_async_read(struct buffer_head *bh) 447 { 448 bh->b_end_io = end_buffer_async_read_io; 449 set_buffer_async_read(bh); 450 } 451 452 static void mark_buffer_async_write_endio(struct buffer_head *bh, 453 bh_end_io_t *handler) 454 { 455 bh->b_end_io = handler; 456 set_buffer_async_write(bh); 457 } 458 459 void mark_buffer_async_write(struct buffer_head *bh) 460 { 461 mark_buffer_async_write_endio(bh, end_buffer_async_write); 462 } 463 EXPORT_SYMBOL(mark_buffer_async_write); 464 465 466 /* 467 * fs/buffer.c contains helper functions for buffer-backed address space's 468 * fsync functions. A common requirement for buffer-based filesystems is 469 * that certain data from the backing blockdev needs to be written out for 470 * a successful fsync(). For example, ext2 indirect blocks need to be 471 * written back and waited upon before fsync() returns. 472 * 473 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(), 474 * inode_has_buffers() and invalidate_inode_buffers() are provided for the 475 * management of a list of dependent buffers at ->i_mapping->private_list. 476 * 477 * Locking is a little subtle: try_to_free_buffers() will remove buffers 478 * from their controlling inode's queue when they are being freed. But 479 * try_to_free_buffers() will be operating against the *blockdev* mapping 480 * at the time, not against the S_ISREG file which depends on those buffers. 481 * So the locking for private_list is via the private_lock in the address_space 482 * which backs the buffers. Which is different from the address_space 483 * against which the buffers are listed. So for a particular address_space, 484 * mapping->private_lock does *not* protect mapping->private_list! In fact, 485 * mapping->private_list will always be protected by the backing blockdev's 486 * ->private_lock. 487 * 488 * Which introduces a requirement: all buffers on an address_space's 489 * ->private_list must be from the same address_space: the blockdev's. 490 * 491 * address_spaces which do not place buffers at ->private_list via these 492 * utility functions are free to use private_lock and private_list for 493 * whatever they want. The only requirement is that list_empty(private_list) 494 * be true at clear_inode() time. 495 * 496 * FIXME: clear_inode should not call invalidate_inode_buffers(). The 497 * filesystems should do that. invalidate_inode_buffers() should just go 498 * BUG_ON(!list_empty). 499 * 500 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should 501 * take an address_space, not an inode. And it should be called 502 * mark_buffer_dirty_fsync() to clearly define why those buffers are being 503 * queued up. 504 * 505 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the 506 * list if it is already on a list. Because if the buffer is on a list, 507 * it *must* already be on the right one. If not, the filesystem is being 508 * silly. This will save a ton of locking. But first we have to ensure 509 * that buffers are taken *off* the old inode's list when they are freed 510 * (presumably in truncate). That requires careful auditing of all 511 * filesystems (do it inside bforget()). It could also be done by bringing 512 * b_inode back. 513 */ 514 515 /* 516 * The buffer's backing address_space's private_lock must be held 517 */ 518 static void __remove_assoc_queue(struct buffer_head *bh) 519 { 520 list_del_init(&bh->b_assoc_buffers); 521 WARN_ON(!bh->b_assoc_map); 522 bh->b_assoc_map = NULL; 523 } 524 525 int inode_has_buffers(struct inode *inode) 526 { 527 return !list_empty(&inode->i_data.private_list); 528 } 529 530 /* 531 * osync is designed to support O_SYNC io. It waits synchronously for 532 * all already-submitted IO to complete, but does not queue any new 533 * writes to the disk. 534 * 535 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer 536 * as you dirty the buffers, and then use osync_inode_buffers to wait for 537 * completion. Any other dirty buffers which are not yet queued for 538 * write will not be flushed to disk by the osync. 539 */ 540 static int osync_buffers_list(spinlock_t *lock, struct list_head *list) 541 { 542 struct buffer_head *bh; 543 struct list_head *p; 544 int err = 0; 545 546 spin_lock(lock); 547 repeat: 548 list_for_each_prev(p, list) { 549 bh = BH_ENTRY(p); 550 if (buffer_locked(bh)) { 551 get_bh(bh); 552 spin_unlock(lock); 553 wait_on_buffer(bh); 554 if (!buffer_uptodate(bh)) 555 err = -EIO; 556 brelse(bh); 557 spin_lock(lock); 558 goto repeat; 559 } 560 } 561 spin_unlock(lock); 562 return err; 563 } 564 565 void emergency_thaw_bdev(struct super_block *sb) 566 { 567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev)) 568 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 569 } 570 571 /** 572 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 573 * @mapping: the mapping which wants those buffers written 574 * 575 * Starts I/O against the buffers at mapping->private_list, and waits upon 576 * that I/O. 577 * 578 * Basically, this is a convenience function for fsync(). 579 * @mapping is a file or directory which needs those buffers to be written for 580 * a successful fsync(). 581 */ 582 int sync_mapping_buffers(struct address_space *mapping) 583 { 584 struct address_space *buffer_mapping = mapping->private_data; 585 586 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 587 return 0; 588 589 return fsync_buffers_list(&buffer_mapping->private_lock, 590 &mapping->private_list); 591 } 592 EXPORT_SYMBOL(sync_mapping_buffers); 593 594 /** 595 * generic_buffers_fsync_noflush - generic buffer fsync implementation 596 * for simple filesystems with no inode lock 597 * 598 * @file: file to synchronize 599 * @start: start offset in bytes 600 * @end: end offset in bytes (inclusive) 601 * @datasync: only synchronize essential metadata if true 602 * 603 * This is a generic implementation of the fsync method for simple 604 * filesystems which track all non-inode metadata in the buffers list 605 * hanging off the address_space structure. 606 */ 607 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end, 608 bool datasync) 609 { 610 struct inode *inode = file->f_mapping->host; 611 int err; 612 int ret; 613 614 err = file_write_and_wait_range(file, start, end); 615 if (err) 616 return err; 617 618 ret = sync_mapping_buffers(inode->i_mapping); 619 if (!(inode->i_state & I_DIRTY_ALL)) 620 goto out; 621 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 622 goto out; 623 624 err = sync_inode_metadata(inode, 1); 625 if (ret == 0) 626 ret = err; 627 628 out: 629 /* check and advance again to catch errors after syncing out buffers */ 630 err = file_check_and_advance_wb_err(file); 631 if (ret == 0) 632 ret = err; 633 return ret; 634 } 635 EXPORT_SYMBOL(generic_buffers_fsync_noflush); 636 637 /** 638 * generic_buffers_fsync - generic buffer fsync implementation 639 * for simple filesystems with no inode lock 640 * 641 * @file: file to synchronize 642 * @start: start offset in bytes 643 * @end: end offset in bytes (inclusive) 644 * @datasync: only synchronize essential metadata if true 645 * 646 * This is a generic implementation of the fsync method for simple 647 * filesystems which track all non-inode metadata in the buffers list 648 * hanging off the address_space structure. This also makes sure that 649 * a device cache flush operation is called at the end. 650 */ 651 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end, 652 bool datasync) 653 { 654 struct inode *inode = file->f_mapping->host; 655 int ret; 656 657 ret = generic_buffers_fsync_noflush(file, start, end, datasync); 658 if (!ret) 659 ret = blkdev_issue_flush(inode->i_sb->s_bdev); 660 return ret; 661 } 662 EXPORT_SYMBOL(generic_buffers_fsync); 663 664 /* 665 * Called when we've recently written block `bblock', and it is known that 666 * `bblock' was for a buffer_boundary() buffer. This means that the block at 667 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's 668 * dirty, schedule it for IO. So that indirects merge nicely with their data. 669 */ 670 void write_boundary_block(struct block_device *bdev, 671 sector_t bblock, unsigned blocksize) 672 { 673 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 674 if (bh) { 675 if (buffer_dirty(bh)) 676 write_dirty_buffer(bh, 0); 677 put_bh(bh); 678 } 679 } 680 681 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) 682 { 683 struct address_space *mapping = inode->i_mapping; 684 struct address_space *buffer_mapping = bh->b_folio->mapping; 685 686 mark_buffer_dirty(bh); 687 if (!mapping->private_data) { 688 mapping->private_data = buffer_mapping; 689 } else { 690 BUG_ON(mapping->private_data != buffer_mapping); 691 } 692 if (!bh->b_assoc_map) { 693 spin_lock(&buffer_mapping->private_lock); 694 list_move_tail(&bh->b_assoc_buffers, 695 &mapping->private_list); 696 bh->b_assoc_map = mapping; 697 spin_unlock(&buffer_mapping->private_lock); 698 } 699 } 700 EXPORT_SYMBOL(mark_buffer_dirty_inode); 701 702 /* 703 * Add a page to the dirty page list. 704 * 705 * It is a sad fact of life that this function is called from several places 706 * deeply under spinlocking. It may not sleep. 707 * 708 * If the page has buffers, the uptodate buffers are set dirty, to preserve 709 * dirty-state coherency between the page and the buffers. It the page does 710 * not have buffers then when they are later attached they will all be set 711 * dirty. 712 * 713 * The buffers are dirtied before the page is dirtied. There's a small race 714 * window in which a writepage caller may see the page cleanness but not the 715 * buffer dirtiness. That's fine. If this code were to set the page dirty 716 * before the buffers, a concurrent writepage caller could clear the page dirty 717 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean 718 * page on the dirty page list. 719 * 720 * We use private_lock to lock against try_to_free_buffers while using the 721 * page's buffer list. Also use this to protect against clean buffers being 722 * added to the page after it was set dirty. 723 * 724 * FIXME: may need to call ->reservepage here as well. That's rather up to the 725 * address_space though. 726 */ 727 bool block_dirty_folio(struct address_space *mapping, struct folio *folio) 728 { 729 struct buffer_head *head; 730 bool newly_dirty; 731 732 spin_lock(&mapping->private_lock); 733 head = folio_buffers(folio); 734 if (head) { 735 struct buffer_head *bh = head; 736 737 do { 738 set_buffer_dirty(bh); 739 bh = bh->b_this_page; 740 } while (bh != head); 741 } 742 /* 743 * Lock out page's memcg migration to keep PageDirty 744 * synchronized with per-memcg dirty page counters. 745 */ 746 folio_memcg_lock(folio); 747 newly_dirty = !folio_test_set_dirty(folio); 748 spin_unlock(&mapping->private_lock); 749 750 if (newly_dirty) 751 __folio_mark_dirty(folio, mapping, 1); 752 753 folio_memcg_unlock(folio); 754 755 if (newly_dirty) 756 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 757 758 return newly_dirty; 759 } 760 EXPORT_SYMBOL(block_dirty_folio); 761 762 /* 763 * Write out and wait upon a list of buffers. 764 * 765 * We have conflicting pressures: we want to make sure that all 766 * initially dirty buffers get waited on, but that any subsequently 767 * dirtied buffers don't. After all, we don't want fsync to last 768 * forever if somebody is actively writing to the file. 769 * 770 * Do this in two main stages: first we copy dirty buffers to a 771 * temporary inode list, queueing the writes as we go. Then we clean 772 * up, waiting for those writes to complete. 773 * 774 * During this second stage, any subsequent updates to the file may end 775 * up refiling the buffer on the original inode's dirty list again, so 776 * there is a chance we will end up with a buffer queued for write but 777 * not yet completed on that list. So, as a final cleanup we go through 778 * the osync code to catch these locked, dirty buffers without requeuing 779 * any newly dirty buffers for write. 780 */ 781 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) 782 { 783 struct buffer_head *bh; 784 struct list_head tmp; 785 struct address_space *mapping; 786 int err = 0, err2; 787 struct blk_plug plug; 788 789 INIT_LIST_HEAD(&tmp); 790 blk_start_plug(&plug); 791 792 spin_lock(lock); 793 while (!list_empty(list)) { 794 bh = BH_ENTRY(list->next); 795 mapping = bh->b_assoc_map; 796 __remove_assoc_queue(bh); 797 /* Avoid race with mark_buffer_dirty_inode() which does 798 * a lockless check and we rely on seeing the dirty bit */ 799 smp_mb(); 800 if (buffer_dirty(bh) || buffer_locked(bh)) { 801 list_add(&bh->b_assoc_buffers, &tmp); 802 bh->b_assoc_map = mapping; 803 if (buffer_dirty(bh)) { 804 get_bh(bh); 805 spin_unlock(lock); 806 /* 807 * Ensure any pending I/O completes so that 808 * write_dirty_buffer() actually writes the 809 * current contents - it is a noop if I/O is 810 * still in flight on potentially older 811 * contents. 812 */ 813 write_dirty_buffer(bh, REQ_SYNC); 814 815 /* 816 * Kick off IO for the previous mapping. Note 817 * that we will not run the very last mapping, 818 * wait_on_buffer() will do that for us 819 * through sync_buffer(). 820 */ 821 brelse(bh); 822 spin_lock(lock); 823 } 824 } 825 } 826 827 spin_unlock(lock); 828 blk_finish_plug(&plug); 829 spin_lock(lock); 830 831 while (!list_empty(&tmp)) { 832 bh = BH_ENTRY(tmp.prev); 833 get_bh(bh); 834 mapping = bh->b_assoc_map; 835 __remove_assoc_queue(bh); 836 /* Avoid race with mark_buffer_dirty_inode() which does 837 * a lockless check and we rely on seeing the dirty bit */ 838 smp_mb(); 839 if (buffer_dirty(bh)) { 840 list_add(&bh->b_assoc_buffers, 841 &mapping->private_list); 842 bh->b_assoc_map = mapping; 843 } 844 spin_unlock(lock); 845 wait_on_buffer(bh); 846 if (!buffer_uptodate(bh)) 847 err = -EIO; 848 brelse(bh); 849 spin_lock(lock); 850 } 851 852 spin_unlock(lock); 853 err2 = osync_buffers_list(lock, list); 854 if (err) 855 return err; 856 else 857 return err2; 858 } 859 860 /* 861 * Invalidate any and all dirty buffers on a given inode. We are 862 * probably unmounting the fs, but that doesn't mean we have already 863 * done a sync(). Just drop the buffers from the inode list. 864 * 865 * NOTE: we take the inode's blockdev's mapping's private_lock. Which 866 * assumes that all the buffers are against the blockdev. Not true 867 * for reiserfs. 868 */ 869 void invalidate_inode_buffers(struct inode *inode) 870 { 871 if (inode_has_buffers(inode)) { 872 struct address_space *mapping = &inode->i_data; 873 struct list_head *list = &mapping->private_list; 874 struct address_space *buffer_mapping = mapping->private_data; 875 876 spin_lock(&buffer_mapping->private_lock); 877 while (!list_empty(list)) 878 __remove_assoc_queue(BH_ENTRY(list->next)); 879 spin_unlock(&buffer_mapping->private_lock); 880 } 881 } 882 EXPORT_SYMBOL(invalidate_inode_buffers); 883 884 /* 885 * Remove any clean buffers from the inode's buffer list. This is called 886 * when we're trying to free the inode itself. Those buffers can pin it. 887 * 888 * Returns true if all buffers were removed. 889 */ 890 int remove_inode_buffers(struct inode *inode) 891 { 892 int ret = 1; 893 894 if (inode_has_buffers(inode)) { 895 struct address_space *mapping = &inode->i_data; 896 struct list_head *list = &mapping->private_list; 897 struct address_space *buffer_mapping = mapping->private_data; 898 899 spin_lock(&buffer_mapping->private_lock); 900 while (!list_empty(list)) { 901 struct buffer_head *bh = BH_ENTRY(list->next); 902 if (buffer_dirty(bh)) { 903 ret = 0; 904 break; 905 } 906 __remove_assoc_queue(bh); 907 } 908 spin_unlock(&buffer_mapping->private_lock); 909 } 910 return ret; 911 } 912 913 /* 914 * Create the appropriate buffers when given a folio for data area and 915 * the size of each buffer.. Use the bh->b_this_page linked list to 916 * follow the buffers created. Return NULL if unable to create more 917 * buffers. 918 * 919 * The retry flag is used to differentiate async IO (paging, swapping) 920 * which may not fail from ordinary buffer allocations. 921 */ 922 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, 923 bool retry) 924 { 925 struct buffer_head *bh, *head; 926 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; 927 long offset; 928 struct mem_cgroup *memcg, *old_memcg; 929 930 if (retry) 931 gfp |= __GFP_NOFAIL; 932 933 /* The folio lock pins the memcg */ 934 memcg = folio_memcg(folio); 935 old_memcg = set_active_memcg(memcg); 936 937 head = NULL; 938 offset = folio_size(folio); 939 while ((offset -= size) >= 0) { 940 bh = alloc_buffer_head(gfp); 941 if (!bh) 942 goto no_grow; 943 944 bh->b_this_page = head; 945 bh->b_blocknr = -1; 946 head = bh; 947 948 bh->b_size = size; 949 950 /* Link the buffer to its folio */ 951 folio_set_bh(bh, folio, offset); 952 } 953 out: 954 set_active_memcg(old_memcg); 955 return head; 956 /* 957 * In case anything failed, we just free everything we got. 958 */ 959 no_grow: 960 if (head) { 961 do { 962 bh = head; 963 head = head->b_this_page; 964 free_buffer_head(bh); 965 } while (head); 966 } 967 968 goto out; 969 } 970 EXPORT_SYMBOL_GPL(folio_alloc_buffers); 971 972 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, 973 bool retry) 974 { 975 return folio_alloc_buffers(page_folio(page), size, retry); 976 } 977 EXPORT_SYMBOL_GPL(alloc_page_buffers); 978 979 static inline void link_dev_buffers(struct folio *folio, 980 struct buffer_head *head) 981 { 982 struct buffer_head *bh, *tail; 983 984 bh = head; 985 do { 986 tail = bh; 987 bh = bh->b_this_page; 988 } while (bh); 989 tail->b_this_page = head; 990 folio_attach_private(folio, head); 991 } 992 993 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) 994 { 995 sector_t retval = ~((sector_t)0); 996 loff_t sz = bdev_nr_bytes(bdev); 997 998 if (sz) { 999 unsigned int sizebits = blksize_bits(size); 1000 retval = (sz >> sizebits); 1001 } 1002 return retval; 1003 } 1004 1005 /* 1006 * Initialise the state of a blockdev folio's buffers. 1007 */ 1008 static sector_t folio_init_buffers(struct folio *folio, 1009 struct block_device *bdev, sector_t block, int size) 1010 { 1011 struct buffer_head *head = folio_buffers(folio); 1012 struct buffer_head *bh = head; 1013 bool uptodate = folio_test_uptodate(folio); 1014 sector_t end_block = blkdev_max_block(bdev, size); 1015 1016 do { 1017 if (!buffer_mapped(bh)) { 1018 bh->b_end_io = NULL; 1019 bh->b_private = NULL; 1020 bh->b_bdev = bdev; 1021 bh->b_blocknr = block; 1022 if (uptodate) 1023 set_buffer_uptodate(bh); 1024 if (block < end_block) 1025 set_buffer_mapped(bh); 1026 } 1027 block++; 1028 bh = bh->b_this_page; 1029 } while (bh != head); 1030 1031 /* 1032 * Caller needs to validate requested block against end of device. 1033 */ 1034 return end_block; 1035 } 1036 1037 /* 1038 * Create the page-cache page that contains the requested block. 1039 * 1040 * This is used purely for blockdev mappings. 1041 */ 1042 static int 1043 grow_dev_page(struct block_device *bdev, sector_t block, 1044 pgoff_t index, int size, int sizebits, gfp_t gfp) 1045 { 1046 struct inode *inode = bdev->bd_inode; 1047 struct folio *folio; 1048 struct buffer_head *bh; 1049 sector_t end_block; 1050 int ret = 0; 1051 gfp_t gfp_mask; 1052 1053 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; 1054 1055 /* 1056 * XXX: __getblk_slow() can not really deal with failure and 1057 * will endlessly loop on improvised global reclaim. Prefer 1058 * looping in the allocator rather than here, at least that 1059 * code knows what it's doing. 1060 */ 1061 gfp_mask |= __GFP_NOFAIL; 1062 1063 folio = __filemap_get_folio(inode->i_mapping, index, 1064 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask); 1065 1066 bh = folio_buffers(folio); 1067 if (bh) { 1068 if (bh->b_size == size) { 1069 end_block = folio_init_buffers(folio, bdev, 1070 (sector_t)index << sizebits, size); 1071 goto done; 1072 } 1073 if (!try_to_free_buffers(folio)) 1074 goto failed; 1075 } 1076 1077 bh = folio_alloc_buffers(folio, size, true); 1078 1079 /* 1080 * Link the folio to the buffers and initialise them. Take the 1081 * lock to be atomic wrt __find_get_block(), which does not 1082 * run under the folio lock. 1083 */ 1084 spin_lock(&inode->i_mapping->private_lock); 1085 link_dev_buffers(folio, bh); 1086 end_block = folio_init_buffers(folio, bdev, 1087 (sector_t)index << sizebits, size); 1088 spin_unlock(&inode->i_mapping->private_lock); 1089 done: 1090 ret = (block < end_block) ? 1 : -ENXIO; 1091 failed: 1092 folio_unlock(folio); 1093 folio_put(folio); 1094 return ret; 1095 } 1096 1097 /* 1098 * Create buffers for the specified block device block's page. If 1099 * that page was dirty, the buffers are set dirty also. 1100 */ 1101 static int 1102 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) 1103 { 1104 pgoff_t index; 1105 int sizebits; 1106 1107 sizebits = PAGE_SHIFT - __ffs(size); 1108 index = block >> sizebits; 1109 1110 /* 1111 * Check for a block which wants to lie outside our maximum possible 1112 * pagecache index. (this comparison is done using sector_t types). 1113 */ 1114 if (unlikely(index != block >> sizebits)) { 1115 printk(KERN_ERR "%s: requested out-of-range block %llu for " 1116 "device %pg\n", 1117 __func__, (unsigned long long)block, 1118 bdev); 1119 return -EIO; 1120 } 1121 1122 /* Create a page with the proper size buffers.. */ 1123 return grow_dev_page(bdev, block, index, size, sizebits, gfp); 1124 } 1125 1126 static struct buffer_head * 1127 __getblk_slow(struct block_device *bdev, sector_t block, 1128 unsigned size, gfp_t gfp) 1129 { 1130 /* Size must be multiple of hard sectorsize */ 1131 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1132 (size < 512 || size > PAGE_SIZE))) { 1133 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1134 size); 1135 printk(KERN_ERR "logical block size: %d\n", 1136 bdev_logical_block_size(bdev)); 1137 1138 dump_stack(); 1139 return NULL; 1140 } 1141 1142 for (;;) { 1143 struct buffer_head *bh; 1144 int ret; 1145 1146 bh = __find_get_block(bdev, block, size); 1147 if (bh) 1148 return bh; 1149 1150 ret = grow_buffers(bdev, block, size, gfp); 1151 if (ret < 0) 1152 return NULL; 1153 } 1154 } 1155 1156 /* 1157 * The relationship between dirty buffers and dirty pages: 1158 * 1159 * Whenever a page has any dirty buffers, the page's dirty bit is set, and 1160 * the page is tagged dirty in the page cache. 1161 * 1162 * At all times, the dirtiness of the buffers represents the dirtiness of 1163 * subsections of the page. If the page has buffers, the page dirty bit is 1164 * merely a hint about the true dirty state. 1165 * 1166 * When a page is set dirty in its entirety, all its buffers are marked dirty 1167 * (if the page has buffers). 1168 * 1169 * When a buffer is marked dirty, its page is dirtied, but the page's other 1170 * buffers are not. 1171 * 1172 * Also. When blockdev buffers are explicitly read with bread(), they 1173 * individually become uptodate. But their backing page remains not 1174 * uptodate - even if all of its buffers are uptodate. A subsequent 1175 * block_read_full_folio() against that folio will discover all the uptodate 1176 * buffers, will set the folio uptodate and will perform no I/O. 1177 */ 1178 1179 /** 1180 * mark_buffer_dirty - mark a buffer_head as needing writeout 1181 * @bh: the buffer_head to mark dirty 1182 * 1183 * mark_buffer_dirty() will set the dirty bit against the buffer, then set 1184 * its backing page dirty, then tag the page as dirty in the page cache 1185 * and then attach the address_space's inode to its superblock's dirty 1186 * inode list. 1187 * 1188 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock, 1189 * i_pages lock and mapping->host->i_lock. 1190 */ 1191 void mark_buffer_dirty(struct buffer_head *bh) 1192 { 1193 WARN_ON_ONCE(!buffer_uptodate(bh)); 1194 1195 trace_block_dirty_buffer(bh); 1196 1197 /* 1198 * Very *carefully* optimize the it-is-already-dirty case. 1199 * 1200 * Don't let the final "is it dirty" escape to before we 1201 * perhaps modified the buffer. 1202 */ 1203 if (buffer_dirty(bh)) { 1204 smp_mb(); 1205 if (buffer_dirty(bh)) 1206 return; 1207 } 1208 1209 if (!test_set_buffer_dirty(bh)) { 1210 struct folio *folio = bh->b_folio; 1211 struct address_space *mapping = NULL; 1212 1213 folio_memcg_lock(folio); 1214 if (!folio_test_set_dirty(folio)) { 1215 mapping = folio->mapping; 1216 if (mapping) 1217 __folio_mark_dirty(folio, mapping, 0); 1218 } 1219 folio_memcg_unlock(folio); 1220 if (mapping) 1221 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1222 } 1223 } 1224 EXPORT_SYMBOL(mark_buffer_dirty); 1225 1226 void mark_buffer_write_io_error(struct buffer_head *bh) 1227 { 1228 set_buffer_write_io_error(bh); 1229 /* FIXME: do we need to set this in both places? */ 1230 if (bh->b_folio && bh->b_folio->mapping) 1231 mapping_set_error(bh->b_folio->mapping, -EIO); 1232 if (bh->b_assoc_map) { 1233 mapping_set_error(bh->b_assoc_map, -EIO); 1234 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO); 1235 } 1236 } 1237 EXPORT_SYMBOL(mark_buffer_write_io_error); 1238 1239 /* 1240 * Decrement a buffer_head's reference count. If all buffers against a page 1241 * have zero reference count, are clean and unlocked, and if the page is clean 1242 * and unlocked then try_to_free_buffers() may strip the buffers from the page 1243 * in preparation for freeing it (sometimes, rarely, buffers are removed from 1244 * a page but it ends up not being freed, and buffers may later be reattached). 1245 */ 1246 void __brelse(struct buffer_head * buf) 1247 { 1248 if (atomic_read(&buf->b_count)) { 1249 put_bh(buf); 1250 return; 1251 } 1252 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1253 } 1254 EXPORT_SYMBOL(__brelse); 1255 1256 /* 1257 * bforget() is like brelse(), except it discards any 1258 * potentially dirty data. 1259 */ 1260 void __bforget(struct buffer_head *bh) 1261 { 1262 clear_buffer_dirty(bh); 1263 if (bh->b_assoc_map) { 1264 struct address_space *buffer_mapping = bh->b_folio->mapping; 1265 1266 spin_lock(&buffer_mapping->private_lock); 1267 list_del_init(&bh->b_assoc_buffers); 1268 bh->b_assoc_map = NULL; 1269 spin_unlock(&buffer_mapping->private_lock); 1270 } 1271 __brelse(bh); 1272 } 1273 EXPORT_SYMBOL(__bforget); 1274 1275 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1276 { 1277 lock_buffer(bh); 1278 if (buffer_uptodate(bh)) { 1279 unlock_buffer(bh); 1280 return bh; 1281 } else { 1282 get_bh(bh); 1283 bh->b_end_io = end_buffer_read_sync; 1284 submit_bh(REQ_OP_READ, bh); 1285 wait_on_buffer(bh); 1286 if (buffer_uptodate(bh)) 1287 return bh; 1288 } 1289 brelse(bh); 1290 return NULL; 1291 } 1292 1293 /* 1294 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). 1295 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their 1296 * refcount elevated by one when they're in an LRU. A buffer can only appear 1297 * once in a particular CPU's LRU. A single buffer can be present in multiple 1298 * CPU's LRUs at the same time. 1299 * 1300 * This is a transparent caching front-end to sb_bread(), sb_getblk() and 1301 * sb_find_get_block(). 1302 * 1303 * The LRUs themselves only need locking against invalidate_bh_lrus. We use 1304 * a local interrupt disable for that. 1305 */ 1306 1307 #define BH_LRU_SIZE 16 1308 1309 struct bh_lru { 1310 struct buffer_head *bhs[BH_LRU_SIZE]; 1311 }; 1312 1313 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }}; 1314 1315 #ifdef CONFIG_SMP 1316 #define bh_lru_lock() local_irq_disable() 1317 #define bh_lru_unlock() local_irq_enable() 1318 #else 1319 #define bh_lru_lock() preempt_disable() 1320 #define bh_lru_unlock() preempt_enable() 1321 #endif 1322 1323 static inline void check_irqs_on(void) 1324 { 1325 #ifdef irqs_disabled 1326 BUG_ON(irqs_disabled()); 1327 #endif 1328 } 1329 1330 /* 1331 * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is 1332 * inserted at the front, and the buffer_head at the back if any is evicted. 1333 * Or, if already in the LRU it is moved to the front. 1334 */ 1335 static void bh_lru_install(struct buffer_head *bh) 1336 { 1337 struct buffer_head *evictee = bh; 1338 struct bh_lru *b; 1339 int i; 1340 1341 check_irqs_on(); 1342 bh_lru_lock(); 1343 1344 /* 1345 * the refcount of buffer_head in bh_lru prevents dropping the 1346 * attached page(i.e., try_to_free_buffers) so it could cause 1347 * failing page migration. 1348 * Skip putting upcoming bh into bh_lru until migration is done. 1349 */ 1350 if (lru_cache_disabled()) { 1351 bh_lru_unlock(); 1352 return; 1353 } 1354 1355 b = this_cpu_ptr(&bh_lrus); 1356 for (i = 0; i < BH_LRU_SIZE; i++) { 1357 swap(evictee, b->bhs[i]); 1358 if (evictee == bh) { 1359 bh_lru_unlock(); 1360 return; 1361 } 1362 } 1363 1364 get_bh(bh); 1365 bh_lru_unlock(); 1366 brelse(evictee); 1367 } 1368 1369 /* 1370 * Look up the bh in this cpu's LRU. If it's there, move it to the head. 1371 */ 1372 static struct buffer_head * 1373 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) 1374 { 1375 struct buffer_head *ret = NULL; 1376 unsigned int i; 1377 1378 check_irqs_on(); 1379 bh_lru_lock(); 1380 for (i = 0; i < BH_LRU_SIZE; i++) { 1381 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); 1382 1383 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && 1384 bh->b_size == size) { 1385 if (i) { 1386 while (i) { 1387 __this_cpu_write(bh_lrus.bhs[i], 1388 __this_cpu_read(bh_lrus.bhs[i - 1])); 1389 i--; 1390 } 1391 __this_cpu_write(bh_lrus.bhs[0], bh); 1392 } 1393 get_bh(bh); 1394 ret = bh; 1395 break; 1396 } 1397 } 1398 bh_lru_unlock(); 1399 return ret; 1400 } 1401 1402 /* 1403 * Perform a pagecache lookup for the matching buffer. If it's there, refresh 1404 * it in the LRU and mark it as accessed. If it is not present then return 1405 * NULL 1406 */ 1407 struct buffer_head * 1408 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) 1409 { 1410 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); 1411 1412 if (bh == NULL) { 1413 /* __find_get_block_slow will mark the page accessed */ 1414 bh = __find_get_block_slow(bdev, block); 1415 if (bh) 1416 bh_lru_install(bh); 1417 } else 1418 touch_buffer(bh); 1419 1420 return bh; 1421 } 1422 EXPORT_SYMBOL(__find_get_block); 1423 1424 /* 1425 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head 1426 * which corresponds to the passed block_device, block and size. The 1427 * returned buffer has its reference count incremented. 1428 * 1429 * __getblk_gfp() will lock up the machine if grow_dev_page's 1430 * try_to_free_buffers() attempt is failing. FIXME, perhaps? 1431 */ 1432 struct buffer_head * 1433 __getblk_gfp(struct block_device *bdev, sector_t block, 1434 unsigned size, gfp_t gfp) 1435 { 1436 struct buffer_head *bh = __find_get_block(bdev, block, size); 1437 1438 might_sleep(); 1439 if (bh == NULL) 1440 bh = __getblk_slow(bdev, block, size, gfp); 1441 return bh; 1442 } 1443 EXPORT_SYMBOL(__getblk_gfp); 1444 1445 /* 1446 * Do async read-ahead on a buffer.. 1447 */ 1448 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) 1449 { 1450 struct buffer_head *bh = __getblk(bdev, block, size); 1451 if (likely(bh)) { 1452 bh_readahead(bh, REQ_RAHEAD); 1453 brelse(bh); 1454 } 1455 } 1456 EXPORT_SYMBOL(__breadahead); 1457 1458 /** 1459 * __bread_gfp() - reads a specified block and returns the bh 1460 * @bdev: the block_device to read from 1461 * @block: number of block 1462 * @size: size (in bytes) to read 1463 * @gfp: page allocation flag 1464 * 1465 * Reads a specified block, and returns buffer head that contains it. 1466 * The page cache can be allocated from non-movable area 1467 * not to prevent page migration if you set gfp to zero. 1468 * It returns NULL if the block was unreadable. 1469 */ 1470 struct buffer_head * 1471 __bread_gfp(struct block_device *bdev, sector_t block, 1472 unsigned size, gfp_t gfp) 1473 { 1474 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 1475 1476 if (likely(bh) && !buffer_uptodate(bh)) 1477 bh = __bread_slow(bh); 1478 return bh; 1479 } 1480 EXPORT_SYMBOL(__bread_gfp); 1481 1482 static void __invalidate_bh_lrus(struct bh_lru *b) 1483 { 1484 int i; 1485 1486 for (i = 0; i < BH_LRU_SIZE; i++) { 1487 brelse(b->bhs[i]); 1488 b->bhs[i] = NULL; 1489 } 1490 } 1491 /* 1492 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1493 * This doesn't race because it runs in each cpu either in irq 1494 * or with preempt disabled. 1495 */ 1496 static void invalidate_bh_lru(void *arg) 1497 { 1498 struct bh_lru *b = &get_cpu_var(bh_lrus); 1499 1500 __invalidate_bh_lrus(b); 1501 put_cpu_var(bh_lrus); 1502 } 1503 1504 bool has_bh_in_lru(int cpu, void *dummy) 1505 { 1506 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); 1507 int i; 1508 1509 for (i = 0; i < BH_LRU_SIZE; i++) { 1510 if (b->bhs[i]) 1511 return true; 1512 } 1513 1514 return false; 1515 } 1516 1517 void invalidate_bh_lrus(void) 1518 { 1519 on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); 1520 } 1521 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1522 1523 /* 1524 * It's called from workqueue context so we need a bh_lru_lock to close 1525 * the race with preemption/irq. 1526 */ 1527 void invalidate_bh_lrus_cpu(void) 1528 { 1529 struct bh_lru *b; 1530 1531 bh_lru_lock(); 1532 b = this_cpu_ptr(&bh_lrus); 1533 __invalidate_bh_lrus(b); 1534 bh_lru_unlock(); 1535 } 1536 1537 void set_bh_page(struct buffer_head *bh, 1538 struct page *page, unsigned long offset) 1539 { 1540 bh->b_page = page; 1541 BUG_ON(offset >= PAGE_SIZE); 1542 if (PageHighMem(page)) 1543 /* 1544 * This catches illegal uses and preserves the offset: 1545 */ 1546 bh->b_data = (char *)(0 + offset); 1547 else 1548 bh->b_data = page_address(page) + offset; 1549 } 1550 EXPORT_SYMBOL(set_bh_page); 1551 1552 void folio_set_bh(struct buffer_head *bh, struct folio *folio, 1553 unsigned long offset) 1554 { 1555 bh->b_folio = folio; 1556 BUG_ON(offset >= folio_size(folio)); 1557 if (folio_test_highmem(folio)) 1558 /* 1559 * This catches illegal uses and preserves the offset: 1560 */ 1561 bh->b_data = (char *)(0 + offset); 1562 else 1563 bh->b_data = folio_address(folio) + offset; 1564 } 1565 EXPORT_SYMBOL(folio_set_bh); 1566 1567 /* 1568 * Called when truncating a buffer on a page completely. 1569 */ 1570 1571 /* Bits that are cleared during an invalidate */ 1572 #define BUFFER_FLAGS_DISCARD \ 1573 (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \ 1574 1 << BH_Delay | 1 << BH_Unwritten) 1575 1576 static void discard_buffer(struct buffer_head * bh) 1577 { 1578 unsigned long b_state; 1579 1580 lock_buffer(bh); 1581 clear_buffer_dirty(bh); 1582 bh->b_bdev = NULL; 1583 b_state = READ_ONCE(bh->b_state); 1584 do { 1585 } while (!try_cmpxchg(&bh->b_state, &b_state, 1586 b_state & ~BUFFER_FLAGS_DISCARD)); 1587 unlock_buffer(bh); 1588 } 1589 1590 /** 1591 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio. 1592 * @folio: The folio which is affected. 1593 * @offset: start of the range to invalidate 1594 * @length: length of the range to invalidate 1595 * 1596 * block_invalidate_folio() is called when all or part of the folio has been 1597 * invalidated by a truncate operation. 1598 * 1599 * block_invalidate_folio() does not have to release all buffers, but it must 1600 * ensure that no dirty buffer is left outside @offset and that no I/O 1601 * is underway against any of the blocks which are outside the truncation 1602 * point. Because the caller is about to free (and possibly reuse) those 1603 * blocks on-disk. 1604 */ 1605 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length) 1606 { 1607 struct buffer_head *head, *bh, *next; 1608 size_t curr_off = 0; 1609 size_t stop = length + offset; 1610 1611 BUG_ON(!folio_test_locked(folio)); 1612 1613 /* 1614 * Check for overflow 1615 */ 1616 BUG_ON(stop > folio_size(folio) || stop < length); 1617 1618 head = folio_buffers(folio); 1619 if (!head) 1620 return; 1621 1622 bh = head; 1623 do { 1624 size_t next_off = curr_off + bh->b_size; 1625 next = bh->b_this_page; 1626 1627 /* 1628 * Are we still fully in range ? 1629 */ 1630 if (next_off > stop) 1631 goto out; 1632 1633 /* 1634 * is this block fully invalidated? 1635 */ 1636 if (offset <= curr_off) 1637 discard_buffer(bh); 1638 curr_off = next_off; 1639 bh = next; 1640 } while (bh != head); 1641 1642 /* 1643 * We release buffers only if the entire folio is being invalidated. 1644 * The get_block cached value has been unconditionally invalidated, 1645 * so real IO is not possible anymore. 1646 */ 1647 if (length == folio_size(folio)) 1648 filemap_release_folio(folio, 0); 1649 out: 1650 return; 1651 } 1652 EXPORT_SYMBOL(block_invalidate_folio); 1653 1654 /* 1655 * We attach and possibly dirty the buffers atomically wrt 1656 * block_dirty_folio() via private_lock. try_to_free_buffers 1657 * is already excluded via the folio lock. 1658 */ 1659 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize, 1660 unsigned long b_state) 1661 { 1662 struct buffer_head *bh, *head, *tail; 1663 1664 head = folio_alloc_buffers(folio, blocksize, true); 1665 bh = head; 1666 do { 1667 bh->b_state |= b_state; 1668 tail = bh; 1669 bh = bh->b_this_page; 1670 } while (bh); 1671 tail->b_this_page = head; 1672 1673 spin_lock(&folio->mapping->private_lock); 1674 if (folio_test_uptodate(folio) || folio_test_dirty(folio)) { 1675 bh = head; 1676 do { 1677 if (folio_test_dirty(folio)) 1678 set_buffer_dirty(bh); 1679 if (folio_test_uptodate(folio)) 1680 set_buffer_uptodate(bh); 1681 bh = bh->b_this_page; 1682 } while (bh != head); 1683 } 1684 folio_attach_private(folio, head); 1685 spin_unlock(&folio->mapping->private_lock); 1686 } 1687 EXPORT_SYMBOL(folio_create_empty_buffers); 1688 1689 void create_empty_buffers(struct page *page, 1690 unsigned long blocksize, unsigned long b_state) 1691 { 1692 folio_create_empty_buffers(page_folio(page), blocksize, b_state); 1693 } 1694 EXPORT_SYMBOL(create_empty_buffers); 1695 1696 /** 1697 * clean_bdev_aliases: clean a range of buffers in block device 1698 * @bdev: Block device to clean buffers in 1699 * @block: Start of a range of blocks to clean 1700 * @len: Number of blocks to clean 1701 * 1702 * We are taking a range of blocks for data and we don't want writeback of any 1703 * buffer-cache aliases starting from return from this function and until the 1704 * moment when something will explicitly mark the buffer dirty (hopefully that 1705 * will not happen until we will free that block ;-) We don't even need to mark 1706 * it not-uptodate - nobody can expect anything from a newly allocated buffer 1707 * anyway. We used to use unmap_buffer() for such invalidation, but that was 1708 * wrong. We definitely don't want to mark the alias unmapped, for example - it 1709 * would confuse anyone who might pick it with bread() afterwards... 1710 * 1711 * Also.. Note that bforget() doesn't lock the buffer. So there can be 1712 * writeout I/O going on against recently-freed buffers. We don't wait on that 1713 * I/O in bforget() - it's more efficient to wait on the I/O only if we really 1714 * need to. That happens here. 1715 */ 1716 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) 1717 { 1718 struct inode *bd_inode = bdev->bd_inode; 1719 struct address_space *bd_mapping = bd_inode->i_mapping; 1720 struct folio_batch fbatch; 1721 pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); 1722 pgoff_t end; 1723 int i, count; 1724 struct buffer_head *bh; 1725 struct buffer_head *head; 1726 1727 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 1728 folio_batch_init(&fbatch); 1729 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { 1730 count = folio_batch_count(&fbatch); 1731 for (i = 0; i < count; i++) { 1732 struct folio *folio = fbatch.folios[i]; 1733 1734 if (!folio_buffers(folio)) 1735 continue; 1736 /* 1737 * We use folio lock instead of bd_mapping->private_lock 1738 * to pin buffers here since we can afford to sleep and 1739 * it scales better than a global spinlock lock. 1740 */ 1741 folio_lock(folio); 1742 /* Recheck when the folio is locked which pins bhs */ 1743 head = folio_buffers(folio); 1744 if (!head) 1745 goto unlock_page; 1746 bh = head; 1747 do { 1748 if (!buffer_mapped(bh) || (bh->b_blocknr < block)) 1749 goto next; 1750 if (bh->b_blocknr >= block + len) 1751 break; 1752 clear_buffer_dirty(bh); 1753 wait_on_buffer(bh); 1754 clear_buffer_req(bh); 1755 next: 1756 bh = bh->b_this_page; 1757 } while (bh != head); 1758 unlock_page: 1759 folio_unlock(folio); 1760 } 1761 folio_batch_release(&fbatch); 1762 cond_resched(); 1763 /* End of range already reached? */ 1764 if (index > end || !index) 1765 break; 1766 } 1767 } 1768 EXPORT_SYMBOL(clean_bdev_aliases); 1769 1770 /* 1771 * Size is a power-of-two in the range 512..PAGE_SIZE, 1772 * and the case we care about most is PAGE_SIZE. 1773 * 1774 * So this *could* possibly be written with those 1775 * constraints in mind (relevant mostly if some 1776 * architecture has a slow bit-scan instruction) 1777 */ 1778 static inline int block_size_bits(unsigned int blocksize) 1779 { 1780 return ilog2(blocksize); 1781 } 1782 1783 static struct buffer_head *folio_create_buffers(struct folio *folio, 1784 struct inode *inode, 1785 unsigned int b_state) 1786 { 1787 BUG_ON(!folio_test_locked(folio)); 1788 1789 if (!folio_buffers(folio)) 1790 folio_create_empty_buffers(folio, 1791 1 << READ_ONCE(inode->i_blkbits), 1792 b_state); 1793 return folio_buffers(folio); 1794 } 1795 1796 /* 1797 * NOTE! All mapped/uptodate combinations are valid: 1798 * 1799 * Mapped Uptodate Meaning 1800 * 1801 * No No "unknown" - must do get_block() 1802 * No Yes "hole" - zero-filled 1803 * Yes No "allocated" - allocated on disk, not read in 1804 * Yes Yes "valid" - allocated and up-to-date in memory. 1805 * 1806 * "Dirty" is valid only with the last case (mapped+uptodate). 1807 */ 1808 1809 /* 1810 * While block_write_full_page is writing back the dirty buffers under 1811 * the page lock, whoever dirtied the buffers may decide to clean them 1812 * again at any time. We handle that by only looking at the buffer 1813 * state inside lock_buffer(). 1814 * 1815 * If block_write_full_page() is called for regular writeback 1816 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a 1817 * locked buffer. This only can happen if someone has written the buffer 1818 * directly, with submit_bh(). At the address_space level PageWriteback 1819 * prevents this contention from occurring. 1820 * 1821 * If block_write_full_page() is called with wbc->sync_mode == 1822 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1823 * causes the writes to be flagged as synchronous writes. 1824 */ 1825 int __block_write_full_folio(struct inode *inode, struct folio *folio, 1826 get_block_t *get_block, struct writeback_control *wbc, 1827 bh_end_io_t *handler) 1828 { 1829 int err; 1830 sector_t block; 1831 sector_t last_block; 1832 struct buffer_head *bh, *head; 1833 unsigned int blocksize, bbits; 1834 int nr_underway = 0; 1835 blk_opf_t write_flags = wbc_to_write_flags(wbc); 1836 1837 head = folio_create_buffers(folio, inode, 1838 (1 << BH_Dirty) | (1 << BH_Uptodate)); 1839 1840 /* 1841 * Be very careful. We have no exclusion from block_dirty_folio 1842 * here, and the (potentially unmapped) buffers may become dirty at 1843 * any time. If a buffer becomes dirty here after we've inspected it 1844 * then we just miss that fact, and the folio stays dirty. 1845 * 1846 * Buffers outside i_size may be dirtied by block_dirty_folio; 1847 * handle that here by just cleaning them. 1848 */ 1849 1850 bh = head; 1851 blocksize = bh->b_size; 1852 bbits = block_size_bits(blocksize); 1853 1854 block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 1855 last_block = (i_size_read(inode) - 1) >> bbits; 1856 1857 /* 1858 * Get all the dirty buffers mapped to disk addresses and 1859 * handle any aliases from the underlying blockdev's mapping. 1860 */ 1861 do { 1862 if (block > last_block) { 1863 /* 1864 * mapped buffers outside i_size will occur, because 1865 * this folio can be outside i_size when there is a 1866 * truncate in progress. 1867 */ 1868 /* 1869 * The buffer was zeroed by block_write_full_page() 1870 */ 1871 clear_buffer_dirty(bh); 1872 set_buffer_uptodate(bh); 1873 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1874 buffer_dirty(bh)) { 1875 WARN_ON(bh->b_size != blocksize); 1876 err = get_block(inode, block, bh, 1); 1877 if (err) 1878 goto recover; 1879 clear_buffer_delay(bh); 1880 if (buffer_new(bh)) { 1881 /* blockdev mappings never come here */ 1882 clear_buffer_new(bh); 1883 clean_bdev_bh_alias(bh); 1884 } 1885 } 1886 bh = bh->b_this_page; 1887 block++; 1888 } while (bh != head); 1889 1890 do { 1891 if (!buffer_mapped(bh)) 1892 continue; 1893 /* 1894 * If it's a fully non-blocking write attempt and we cannot 1895 * lock the buffer then redirty the folio. Note that this can 1896 * potentially cause a busy-wait loop from writeback threads 1897 * and kswapd activity, but those code paths have their own 1898 * higher-level throttling. 1899 */ 1900 if (wbc->sync_mode != WB_SYNC_NONE) { 1901 lock_buffer(bh); 1902 } else if (!trylock_buffer(bh)) { 1903 folio_redirty_for_writepage(wbc, folio); 1904 continue; 1905 } 1906 if (test_clear_buffer_dirty(bh)) { 1907 mark_buffer_async_write_endio(bh, handler); 1908 } else { 1909 unlock_buffer(bh); 1910 } 1911 } while ((bh = bh->b_this_page) != head); 1912 1913 /* 1914 * The folio and its buffers are protected by the writeback flag, 1915 * so we can drop the bh refcounts early. 1916 */ 1917 BUG_ON(folio_test_writeback(folio)); 1918 folio_start_writeback(folio); 1919 1920 do { 1921 struct buffer_head *next = bh->b_this_page; 1922 if (buffer_async_write(bh)) { 1923 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1924 nr_underway++; 1925 } 1926 bh = next; 1927 } while (bh != head); 1928 folio_unlock(folio); 1929 1930 err = 0; 1931 done: 1932 if (nr_underway == 0) { 1933 /* 1934 * The folio was marked dirty, but the buffers were 1935 * clean. Someone wrote them back by hand with 1936 * write_dirty_buffer/submit_bh. A rare case. 1937 */ 1938 folio_end_writeback(folio); 1939 1940 /* 1941 * The folio and buffer_heads can be released at any time from 1942 * here on. 1943 */ 1944 } 1945 return err; 1946 1947 recover: 1948 /* 1949 * ENOSPC, or some other error. We may already have added some 1950 * blocks to the file, so we need to write these out to avoid 1951 * exposing stale data. 1952 * The folio is currently locked and not marked for writeback 1953 */ 1954 bh = head; 1955 /* Recovery: lock and submit the mapped buffers */ 1956 do { 1957 if (buffer_mapped(bh) && buffer_dirty(bh) && 1958 !buffer_delay(bh)) { 1959 lock_buffer(bh); 1960 mark_buffer_async_write_endio(bh, handler); 1961 } else { 1962 /* 1963 * The buffer may have been set dirty during 1964 * attachment to a dirty folio. 1965 */ 1966 clear_buffer_dirty(bh); 1967 } 1968 } while ((bh = bh->b_this_page) != head); 1969 folio_set_error(folio); 1970 BUG_ON(folio_test_writeback(folio)); 1971 mapping_set_error(folio->mapping, err); 1972 folio_start_writeback(folio); 1973 do { 1974 struct buffer_head *next = bh->b_this_page; 1975 if (buffer_async_write(bh)) { 1976 clear_buffer_dirty(bh); 1977 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1978 nr_underway++; 1979 } 1980 bh = next; 1981 } while (bh != head); 1982 folio_unlock(folio); 1983 goto done; 1984 } 1985 EXPORT_SYMBOL(__block_write_full_folio); 1986 1987 /* 1988 * If a folio has any new buffers, zero them out here, and mark them uptodate 1989 * and dirty so they'll be written out (in order to prevent uninitialised 1990 * block data from leaking). And clear the new bit. 1991 */ 1992 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to) 1993 { 1994 size_t block_start, block_end; 1995 struct buffer_head *head, *bh; 1996 1997 BUG_ON(!folio_test_locked(folio)); 1998 head = folio_buffers(folio); 1999 if (!head) 2000 return; 2001 2002 bh = head; 2003 block_start = 0; 2004 do { 2005 block_end = block_start + bh->b_size; 2006 2007 if (buffer_new(bh)) { 2008 if (block_end > from && block_start < to) { 2009 if (!folio_test_uptodate(folio)) { 2010 size_t start, xend; 2011 2012 start = max(from, block_start); 2013 xend = min(to, block_end); 2014 2015 folio_zero_segment(folio, start, xend); 2016 set_buffer_uptodate(bh); 2017 } 2018 2019 clear_buffer_new(bh); 2020 mark_buffer_dirty(bh); 2021 } 2022 } 2023 2024 block_start = block_end; 2025 bh = bh->b_this_page; 2026 } while (bh != head); 2027 } 2028 EXPORT_SYMBOL(folio_zero_new_buffers); 2029 2030 static void 2031 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, 2032 const struct iomap *iomap) 2033 { 2034 loff_t offset = block << inode->i_blkbits; 2035 2036 bh->b_bdev = iomap->bdev; 2037 2038 /* 2039 * Block points to offset in file we need to map, iomap contains 2040 * the offset at which the map starts. If the map ends before the 2041 * current block, then do not map the buffer and let the caller 2042 * handle it. 2043 */ 2044 BUG_ON(offset >= iomap->offset + iomap->length); 2045 2046 switch (iomap->type) { 2047 case IOMAP_HOLE: 2048 /* 2049 * If the buffer is not up to date or beyond the current EOF, 2050 * we need to mark it as new to ensure sub-block zeroing is 2051 * executed if necessary. 2052 */ 2053 if (!buffer_uptodate(bh) || 2054 (offset >= i_size_read(inode))) 2055 set_buffer_new(bh); 2056 break; 2057 case IOMAP_DELALLOC: 2058 if (!buffer_uptodate(bh) || 2059 (offset >= i_size_read(inode))) 2060 set_buffer_new(bh); 2061 set_buffer_uptodate(bh); 2062 set_buffer_mapped(bh); 2063 set_buffer_delay(bh); 2064 break; 2065 case IOMAP_UNWRITTEN: 2066 /* 2067 * For unwritten regions, we always need to ensure that regions 2068 * in the block we are not writing to are zeroed. Mark the 2069 * buffer as new to ensure this. 2070 */ 2071 set_buffer_new(bh); 2072 set_buffer_unwritten(bh); 2073 fallthrough; 2074 case IOMAP_MAPPED: 2075 if ((iomap->flags & IOMAP_F_NEW) || 2076 offset >= i_size_read(inode)) 2077 set_buffer_new(bh); 2078 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> 2079 inode->i_blkbits; 2080 set_buffer_mapped(bh); 2081 break; 2082 } 2083 } 2084 2085 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len, 2086 get_block_t *get_block, const struct iomap *iomap) 2087 { 2088 unsigned from = pos & (PAGE_SIZE - 1); 2089 unsigned to = from + len; 2090 struct inode *inode = folio->mapping->host; 2091 unsigned block_start, block_end; 2092 sector_t block; 2093 int err = 0; 2094 unsigned blocksize, bbits; 2095 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; 2096 2097 BUG_ON(!folio_test_locked(folio)); 2098 BUG_ON(from > PAGE_SIZE); 2099 BUG_ON(to > PAGE_SIZE); 2100 BUG_ON(from > to); 2101 2102 head = folio_create_buffers(folio, inode, 0); 2103 blocksize = head->b_size; 2104 bbits = block_size_bits(blocksize); 2105 2106 block = (sector_t)folio->index << (PAGE_SHIFT - bbits); 2107 2108 for(bh = head, block_start = 0; bh != head || !block_start; 2109 block++, block_start=block_end, bh = bh->b_this_page) { 2110 block_end = block_start + blocksize; 2111 if (block_end <= from || block_start >= to) { 2112 if (folio_test_uptodate(folio)) { 2113 if (!buffer_uptodate(bh)) 2114 set_buffer_uptodate(bh); 2115 } 2116 continue; 2117 } 2118 if (buffer_new(bh)) 2119 clear_buffer_new(bh); 2120 if (!buffer_mapped(bh)) { 2121 WARN_ON(bh->b_size != blocksize); 2122 if (get_block) { 2123 err = get_block(inode, block, bh, 1); 2124 if (err) 2125 break; 2126 } else { 2127 iomap_to_bh(inode, block, bh, iomap); 2128 } 2129 2130 if (buffer_new(bh)) { 2131 clean_bdev_bh_alias(bh); 2132 if (folio_test_uptodate(folio)) { 2133 clear_buffer_new(bh); 2134 set_buffer_uptodate(bh); 2135 mark_buffer_dirty(bh); 2136 continue; 2137 } 2138 if (block_end > to || block_start < from) 2139 folio_zero_segments(folio, 2140 to, block_end, 2141 block_start, from); 2142 continue; 2143 } 2144 } 2145 if (folio_test_uptodate(folio)) { 2146 if (!buffer_uptodate(bh)) 2147 set_buffer_uptodate(bh); 2148 continue; 2149 } 2150 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2151 !buffer_unwritten(bh) && 2152 (block_start < from || block_end > to)) { 2153 bh_read_nowait(bh, 0); 2154 *wait_bh++=bh; 2155 } 2156 } 2157 /* 2158 * If we issued read requests - let them complete. 2159 */ 2160 while(wait_bh > wait) { 2161 wait_on_buffer(*--wait_bh); 2162 if (!buffer_uptodate(*wait_bh)) 2163 err = -EIO; 2164 } 2165 if (unlikely(err)) 2166 folio_zero_new_buffers(folio, from, to); 2167 return err; 2168 } 2169 2170 int __block_write_begin(struct page *page, loff_t pos, unsigned len, 2171 get_block_t *get_block) 2172 { 2173 return __block_write_begin_int(page_folio(page), pos, len, get_block, 2174 NULL); 2175 } 2176 EXPORT_SYMBOL(__block_write_begin); 2177 2178 static int __block_commit_write(struct inode *inode, struct folio *folio, 2179 size_t from, size_t to) 2180 { 2181 size_t block_start, block_end; 2182 bool partial = false; 2183 unsigned blocksize; 2184 struct buffer_head *bh, *head; 2185 2186 bh = head = folio_buffers(folio); 2187 blocksize = bh->b_size; 2188 2189 block_start = 0; 2190 do { 2191 block_end = block_start + blocksize; 2192 if (block_end <= from || block_start >= to) { 2193 if (!buffer_uptodate(bh)) 2194 partial = true; 2195 } else { 2196 set_buffer_uptodate(bh); 2197 mark_buffer_dirty(bh); 2198 } 2199 if (buffer_new(bh)) 2200 clear_buffer_new(bh); 2201 2202 block_start = block_end; 2203 bh = bh->b_this_page; 2204 } while (bh != head); 2205 2206 /* 2207 * If this is a partial write which happened to make all buffers 2208 * uptodate then we can optimize away a bogus read_folio() for 2209 * the next read(). Here we 'discover' whether the folio went 2210 * uptodate as a result of this (potentially partial) write. 2211 */ 2212 if (!partial) 2213 folio_mark_uptodate(folio); 2214 return 0; 2215 } 2216 2217 /* 2218 * block_write_begin takes care of the basic task of block allocation and 2219 * bringing partial write blocks uptodate first. 2220 * 2221 * The filesystem needs to handle block truncation upon failure. 2222 */ 2223 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, 2224 struct page **pagep, get_block_t *get_block) 2225 { 2226 pgoff_t index = pos >> PAGE_SHIFT; 2227 struct page *page; 2228 int status; 2229 2230 page = grab_cache_page_write_begin(mapping, index); 2231 if (!page) 2232 return -ENOMEM; 2233 2234 status = __block_write_begin(page, pos, len, get_block); 2235 if (unlikely(status)) { 2236 unlock_page(page); 2237 put_page(page); 2238 page = NULL; 2239 } 2240 2241 *pagep = page; 2242 return status; 2243 } 2244 EXPORT_SYMBOL(block_write_begin); 2245 2246 int block_write_end(struct file *file, struct address_space *mapping, 2247 loff_t pos, unsigned len, unsigned copied, 2248 struct page *page, void *fsdata) 2249 { 2250 struct folio *folio = page_folio(page); 2251 struct inode *inode = mapping->host; 2252 size_t start = pos - folio_pos(folio); 2253 2254 if (unlikely(copied < len)) { 2255 /* 2256 * The buffers that were written will now be uptodate, so 2257 * we don't have to worry about a read_folio reading them 2258 * and overwriting a partial write. However if we have 2259 * encountered a short write and only partially written 2260 * into a buffer, it will not be marked uptodate, so a 2261 * read_folio might come in and destroy our partial write. 2262 * 2263 * Do the simplest thing, and just treat any short write to a 2264 * non uptodate folio as a zero-length write, and force the 2265 * caller to redo the whole thing. 2266 */ 2267 if (!folio_test_uptodate(folio)) 2268 copied = 0; 2269 2270 folio_zero_new_buffers(folio, start+copied, start+len); 2271 } 2272 flush_dcache_folio(folio); 2273 2274 /* This could be a short (even 0-length) commit */ 2275 __block_commit_write(inode, folio, start, start + copied); 2276 2277 return copied; 2278 } 2279 EXPORT_SYMBOL(block_write_end); 2280 2281 int generic_write_end(struct file *file, struct address_space *mapping, 2282 loff_t pos, unsigned len, unsigned copied, 2283 struct page *page, void *fsdata) 2284 { 2285 struct inode *inode = mapping->host; 2286 loff_t old_size = inode->i_size; 2287 bool i_size_changed = false; 2288 2289 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2290 2291 /* 2292 * No need to use i_size_read() here, the i_size cannot change under us 2293 * because we hold i_rwsem. 2294 * 2295 * But it's important to update i_size while still holding page lock: 2296 * page writeout could otherwise come in and zero beyond i_size. 2297 */ 2298 if (pos + copied > inode->i_size) { 2299 i_size_write(inode, pos + copied); 2300 i_size_changed = true; 2301 } 2302 2303 unlock_page(page); 2304 put_page(page); 2305 2306 if (old_size < pos) 2307 pagecache_isize_extended(inode, old_size, pos); 2308 /* 2309 * Don't mark the inode dirty under page lock. First, it unnecessarily 2310 * makes the holding time of page lock longer. Second, it forces lock 2311 * ordering of page lock and transaction start for journaling 2312 * filesystems. 2313 */ 2314 if (i_size_changed) 2315 mark_inode_dirty(inode); 2316 return copied; 2317 } 2318 EXPORT_SYMBOL(generic_write_end); 2319 2320 /* 2321 * block_is_partially_uptodate checks whether buffers within a folio are 2322 * uptodate or not. 2323 * 2324 * Returns true if all buffers which correspond to the specified part 2325 * of the folio are uptodate. 2326 */ 2327 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) 2328 { 2329 unsigned block_start, block_end, blocksize; 2330 unsigned to; 2331 struct buffer_head *bh, *head; 2332 bool ret = true; 2333 2334 head = folio_buffers(folio); 2335 if (!head) 2336 return false; 2337 blocksize = head->b_size; 2338 to = min_t(unsigned, folio_size(folio) - from, count); 2339 to = from + to; 2340 if (from < blocksize && to > folio_size(folio) - blocksize) 2341 return false; 2342 2343 bh = head; 2344 block_start = 0; 2345 do { 2346 block_end = block_start + blocksize; 2347 if (block_end > from && block_start < to) { 2348 if (!buffer_uptodate(bh)) { 2349 ret = false; 2350 break; 2351 } 2352 if (block_end >= to) 2353 break; 2354 } 2355 block_start = block_end; 2356 bh = bh->b_this_page; 2357 } while (bh != head); 2358 2359 return ret; 2360 } 2361 EXPORT_SYMBOL(block_is_partially_uptodate); 2362 2363 /* 2364 * Generic "read_folio" function for block devices that have the normal 2365 * get_block functionality. This is most of the block device filesystems. 2366 * Reads the folio asynchronously --- the unlock_buffer() and 2367 * set/clear_buffer_uptodate() functions propagate buffer state into the 2368 * folio once IO has completed. 2369 */ 2370 int block_read_full_folio(struct folio *folio, get_block_t *get_block) 2371 { 2372 struct inode *inode = folio->mapping->host; 2373 sector_t iblock, lblock; 2374 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; 2375 unsigned int blocksize, bbits; 2376 int nr, i; 2377 int fully_mapped = 1; 2378 bool page_error = false; 2379 loff_t limit = i_size_read(inode); 2380 2381 /* This is needed for ext4. */ 2382 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) 2383 limit = inode->i_sb->s_maxbytes; 2384 2385 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2386 2387 head = folio_create_buffers(folio, inode, 0); 2388 blocksize = head->b_size; 2389 bbits = block_size_bits(blocksize); 2390 2391 iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits); 2392 lblock = (limit+blocksize-1) >> bbits; 2393 bh = head; 2394 nr = 0; 2395 i = 0; 2396 2397 do { 2398 if (buffer_uptodate(bh)) 2399 continue; 2400 2401 if (!buffer_mapped(bh)) { 2402 int err = 0; 2403 2404 fully_mapped = 0; 2405 if (iblock < lblock) { 2406 WARN_ON(bh->b_size != blocksize); 2407 err = get_block(inode, iblock, bh, 0); 2408 if (err) { 2409 folio_set_error(folio); 2410 page_error = true; 2411 } 2412 } 2413 if (!buffer_mapped(bh)) { 2414 folio_zero_range(folio, i * blocksize, 2415 blocksize); 2416 if (!err) 2417 set_buffer_uptodate(bh); 2418 continue; 2419 } 2420 /* 2421 * get_block() might have updated the buffer 2422 * synchronously 2423 */ 2424 if (buffer_uptodate(bh)) 2425 continue; 2426 } 2427 arr[nr++] = bh; 2428 } while (i++, iblock++, (bh = bh->b_this_page) != head); 2429 2430 if (fully_mapped) 2431 folio_set_mappedtodisk(folio); 2432 2433 if (!nr) { 2434 /* 2435 * All buffers are uptodate - we can set the folio uptodate 2436 * as well. But not if get_block() returned an error. 2437 */ 2438 if (!page_error) 2439 folio_mark_uptodate(folio); 2440 folio_unlock(folio); 2441 return 0; 2442 } 2443 2444 /* Stage two: lock the buffers */ 2445 for (i = 0; i < nr; i++) { 2446 bh = arr[i]; 2447 lock_buffer(bh); 2448 mark_buffer_async_read(bh); 2449 } 2450 2451 /* 2452 * Stage 3: start the IO. Check for uptodateness 2453 * inside the buffer lock in case another process reading 2454 * the underlying blockdev brought it uptodate (the sct fix). 2455 */ 2456 for (i = 0; i < nr; i++) { 2457 bh = arr[i]; 2458 if (buffer_uptodate(bh)) 2459 end_buffer_async_read(bh, 1); 2460 else 2461 submit_bh(REQ_OP_READ, bh); 2462 } 2463 return 0; 2464 } 2465 EXPORT_SYMBOL(block_read_full_folio); 2466 2467 /* utility function for filesystems that need to do work on expanding 2468 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2469 * deal with the hole. 2470 */ 2471 int generic_cont_expand_simple(struct inode *inode, loff_t size) 2472 { 2473 struct address_space *mapping = inode->i_mapping; 2474 const struct address_space_operations *aops = mapping->a_ops; 2475 struct page *page; 2476 void *fsdata = NULL; 2477 int err; 2478 2479 err = inode_newsize_ok(inode, size); 2480 if (err) 2481 goto out; 2482 2483 err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata); 2484 if (err) 2485 goto out; 2486 2487 err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata); 2488 BUG_ON(err > 0); 2489 2490 out: 2491 return err; 2492 } 2493 EXPORT_SYMBOL(generic_cont_expand_simple); 2494 2495 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2496 loff_t pos, loff_t *bytes) 2497 { 2498 struct inode *inode = mapping->host; 2499 const struct address_space_operations *aops = mapping->a_ops; 2500 unsigned int blocksize = i_blocksize(inode); 2501 struct page *page; 2502 void *fsdata = NULL; 2503 pgoff_t index, curidx; 2504 loff_t curpos; 2505 unsigned zerofrom, offset, len; 2506 int err = 0; 2507 2508 index = pos >> PAGE_SHIFT; 2509 offset = pos & ~PAGE_MASK; 2510 2511 while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { 2512 zerofrom = curpos & ~PAGE_MASK; 2513 if (zerofrom & (blocksize-1)) { 2514 *bytes |= (blocksize-1); 2515 (*bytes)++; 2516 } 2517 len = PAGE_SIZE - zerofrom; 2518 2519 err = aops->write_begin(file, mapping, curpos, len, 2520 &page, &fsdata); 2521 if (err) 2522 goto out; 2523 zero_user(page, zerofrom, len); 2524 err = aops->write_end(file, mapping, curpos, len, len, 2525 page, fsdata); 2526 if (err < 0) 2527 goto out; 2528 BUG_ON(err != len); 2529 err = 0; 2530 2531 balance_dirty_pages_ratelimited(mapping); 2532 2533 if (fatal_signal_pending(current)) { 2534 err = -EINTR; 2535 goto out; 2536 } 2537 } 2538 2539 /* page covers the boundary, find the boundary offset */ 2540 if (index == curidx) { 2541 zerofrom = curpos & ~PAGE_MASK; 2542 /* if we will expand the thing last block will be filled */ 2543 if (offset <= zerofrom) { 2544 goto out; 2545 } 2546 if (zerofrom & (blocksize-1)) { 2547 *bytes |= (blocksize-1); 2548 (*bytes)++; 2549 } 2550 len = offset - zerofrom; 2551 2552 err = aops->write_begin(file, mapping, curpos, len, 2553 &page, &fsdata); 2554 if (err) 2555 goto out; 2556 zero_user(page, zerofrom, len); 2557 err = aops->write_end(file, mapping, curpos, len, len, 2558 page, fsdata); 2559 if (err < 0) 2560 goto out; 2561 BUG_ON(err != len); 2562 err = 0; 2563 } 2564 out: 2565 return err; 2566 } 2567 2568 /* 2569 * For moronic filesystems that do not allow holes in file. 2570 * We may have to extend the file. 2571 */ 2572 int cont_write_begin(struct file *file, struct address_space *mapping, 2573 loff_t pos, unsigned len, 2574 struct page **pagep, void **fsdata, 2575 get_block_t *get_block, loff_t *bytes) 2576 { 2577 struct inode *inode = mapping->host; 2578 unsigned int blocksize = i_blocksize(inode); 2579 unsigned int zerofrom; 2580 int err; 2581 2582 err = cont_expand_zero(file, mapping, pos, bytes); 2583 if (err) 2584 return err; 2585 2586 zerofrom = *bytes & ~PAGE_MASK; 2587 if (pos+len > *bytes && zerofrom & (blocksize-1)) { 2588 *bytes |= (blocksize-1); 2589 (*bytes)++; 2590 } 2591 2592 return block_write_begin(mapping, pos, len, pagep, get_block); 2593 } 2594 EXPORT_SYMBOL(cont_write_begin); 2595 2596 int block_commit_write(struct page *page, unsigned from, unsigned to) 2597 { 2598 struct folio *folio = page_folio(page); 2599 struct inode *inode = folio->mapping->host; 2600 __block_commit_write(inode, folio, from, to); 2601 return 0; 2602 } 2603 EXPORT_SYMBOL(block_commit_write); 2604 2605 /* 2606 * block_page_mkwrite() is not allowed to change the file size as it gets 2607 * called from a page fault handler when a page is first dirtied. Hence we must 2608 * be careful to check for EOF conditions here. We set the page up correctly 2609 * for a written page which means we get ENOSPC checking when writing into 2610 * holes and correct delalloc and unwritten extent mapping on filesystems that 2611 * support these features. 2612 * 2613 * We are not allowed to take the i_mutex here so we have to play games to 2614 * protect against truncate races as the page could now be beyond EOF. Because 2615 * truncate writes the inode size before removing pages, once we have the 2616 * page lock we can determine safely if the page is beyond EOF. If it is not 2617 * beyond EOF, then the page is guaranteed safe against truncation until we 2618 * unlock the page. 2619 * 2620 * Direct callers of this function should protect against filesystem freezing 2621 * using sb_start_pagefault() - sb_end_pagefault() functions. 2622 */ 2623 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2624 get_block_t get_block) 2625 { 2626 struct folio *folio = page_folio(vmf->page); 2627 struct inode *inode = file_inode(vma->vm_file); 2628 unsigned long end; 2629 loff_t size; 2630 int ret; 2631 2632 folio_lock(folio); 2633 size = i_size_read(inode); 2634 if ((folio->mapping != inode->i_mapping) || 2635 (folio_pos(folio) >= size)) { 2636 /* We overload EFAULT to mean page got truncated */ 2637 ret = -EFAULT; 2638 goto out_unlock; 2639 } 2640 2641 end = folio_size(folio); 2642 /* folio is wholly or partially inside EOF */ 2643 if (folio_pos(folio) + end > size) 2644 end = size - folio_pos(folio); 2645 2646 ret = __block_write_begin_int(folio, 0, end, get_block, NULL); 2647 if (!ret) 2648 ret = __block_commit_write(inode, folio, 0, end); 2649 2650 if (unlikely(ret < 0)) 2651 goto out_unlock; 2652 folio_mark_dirty(folio); 2653 folio_wait_stable(folio); 2654 return 0; 2655 out_unlock: 2656 folio_unlock(folio); 2657 return ret; 2658 } 2659 EXPORT_SYMBOL(block_page_mkwrite); 2660 2661 int block_truncate_page(struct address_space *mapping, 2662 loff_t from, get_block_t *get_block) 2663 { 2664 pgoff_t index = from >> PAGE_SHIFT; 2665 unsigned blocksize; 2666 sector_t iblock; 2667 size_t offset, length, pos; 2668 struct inode *inode = mapping->host; 2669 struct folio *folio; 2670 struct buffer_head *bh; 2671 int err = 0; 2672 2673 blocksize = i_blocksize(inode); 2674 length = from & (blocksize - 1); 2675 2676 /* Block boundary? Nothing to do */ 2677 if (!length) 2678 return 0; 2679 2680 length = blocksize - length; 2681 iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); 2682 2683 folio = filemap_grab_folio(mapping, index); 2684 if (IS_ERR(folio)) 2685 return PTR_ERR(folio); 2686 2687 bh = folio_buffers(folio); 2688 if (!bh) { 2689 folio_create_empty_buffers(folio, blocksize, 0); 2690 bh = folio_buffers(folio); 2691 } 2692 2693 /* Find the buffer that contains "offset" */ 2694 offset = offset_in_folio(folio, from); 2695 pos = blocksize; 2696 while (offset >= pos) { 2697 bh = bh->b_this_page; 2698 iblock++; 2699 pos += blocksize; 2700 } 2701 2702 if (!buffer_mapped(bh)) { 2703 WARN_ON(bh->b_size != blocksize); 2704 err = get_block(inode, iblock, bh, 0); 2705 if (err) 2706 goto unlock; 2707 /* unmapped? It's a hole - nothing to do */ 2708 if (!buffer_mapped(bh)) 2709 goto unlock; 2710 } 2711 2712 /* Ok, it's mapped. Make sure it's up-to-date */ 2713 if (folio_test_uptodate(folio)) 2714 set_buffer_uptodate(bh); 2715 2716 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2717 err = bh_read(bh, 0); 2718 /* Uhhuh. Read error. Complain and punt. */ 2719 if (err < 0) 2720 goto unlock; 2721 } 2722 2723 folio_zero_range(folio, offset, length); 2724 mark_buffer_dirty(bh); 2725 2726 unlock: 2727 folio_unlock(folio); 2728 folio_put(folio); 2729 2730 return err; 2731 } 2732 EXPORT_SYMBOL(block_truncate_page); 2733 2734 /* 2735 * The generic ->writepage function for buffer-backed address_spaces 2736 */ 2737 int block_write_full_page(struct page *page, get_block_t *get_block, 2738 struct writeback_control *wbc) 2739 { 2740 struct folio *folio = page_folio(page); 2741 struct inode * const inode = folio->mapping->host; 2742 loff_t i_size = i_size_read(inode); 2743 2744 /* Is the folio fully inside i_size? */ 2745 if (folio_pos(folio) + folio_size(folio) <= i_size) 2746 return __block_write_full_folio(inode, folio, get_block, wbc, 2747 end_buffer_async_write); 2748 2749 /* Is the folio fully outside i_size? (truncate in progress) */ 2750 if (folio_pos(folio) >= i_size) { 2751 folio_unlock(folio); 2752 return 0; /* don't care */ 2753 } 2754 2755 /* 2756 * The folio straddles i_size. It must be zeroed out on each and every 2757 * writepage invocation because it may be mmapped. "A file is mapped 2758 * in multiples of the page size. For a file that is not a multiple of 2759 * the page size, the remaining memory is zeroed when mapped, and 2760 * writes to that region are not written out to the file." 2761 */ 2762 folio_zero_segment(folio, offset_in_folio(folio, i_size), 2763 folio_size(folio)); 2764 return __block_write_full_folio(inode, folio, get_block, wbc, 2765 end_buffer_async_write); 2766 } 2767 EXPORT_SYMBOL(block_write_full_page); 2768 2769 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2770 get_block_t *get_block) 2771 { 2772 struct inode *inode = mapping->host; 2773 struct buffer_head tmp = { 2774 .b_size = i_blocksize(inode), 2775 }; 2776 2777 get_block(inode, block, &tmp, 0); 2778 return tmp.b_blocknr; 2779 } 2780 EXPORT_SYMBOL(generic_block_bmap); 2781 2782 static void end_bio_bh_io_sync(struct bio *bio) 2783 { 2784 struct buffer_head *bh = bio->bi_private; 2785 2786 if (unlikely(bio_flagged(bio, BIO_QUIET))) 2787 set_bit(BH_Quiet, &bh->b_state); 2788 2789 bh->b_end_io(bh, !bio->bi_status); 2790 bio_put(bio); 2791 } 2792 2793 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2794 struct writeback_control *wbc) 2795 { 2796 const enum req_op op = opf & REQ_OP_MASK; 2797 struct bio *bio; 2798 2799 BUG_ON(!buffer_locked(bh)); 2800 BUG_ON(!buffer_mapped(bh)); 2801 BUG_ON(!bh->b_end_io); 2802 BUG_ON(buffer_delay(bh)); 2803 BUG_ON(buffer_unwritten(bh)); 2804 2805 /* 2806 * Only clear out a write error when rewriting 2807 */ 2808 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 2809 clear_buffer_write_io_error(bh); 2810 2811 if (buffer_meta(bh)) 2812 opf |= REQ_META; 2813 if (buffer_prio(bh)) 2814 opf |= REQ_PRIO; 2815 2816 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 2817 2818 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 2819 2820 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 2821 2822 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 2823 2824 bio->bi_end_io = end_bio_bh_io_sync; 2825 bio->bi_private = bh; 2826 2827 /* Take care of bh's that straddle the end of the device */ 2828 guard_bio_eod(bio); 2829 2830 if (wbc) { 2831 wbc_init_bio(wbc, bio); 2832 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); 2833 } 2834 2835 submit_bio(bio); 2836 } 2837 2838 void submit_bh(blk_opf_t opf, struct buffer_head *bh) 2839 { 2840 submit_bh_wbc(opf, bh, NULL); 2841 } 2842 EXPORT_SYMBOL(submit_bh); 2843 2844 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2845 { 2846 lock_buffer(bh); 2847 if (!test_clear_buffer_dirty(bh)) { 2848 unlock_buffer(bh); 2849 return; 2850 } 2851 bh->b_end_io = end_buffer_write_sync; 2852 get_bh(bh); 2853 submit_bh(REQ_OP_WRITE | op_flags, bh); 2854 } 2855 EXPORT_SYMBOL(write_dirty_buffer); 2856 2857 /* 2858 * For a data-integrity writeout, we need to wait upon any in-progress I/O 2859 * and then start new I/O and then wait upon it. The caller must have a ref on 2860 * the buffer_head. 2861 */ 2862 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags) 2863 { 2864 WARN_ON(atomic_read(&bh->b_count) < 1); 2865 lock_buffer(bh); 2866 if (test_clear_buffer_dirty(bh)) { 2867 /* 2868 * The bh should be mapped, but it might not be if the 2869 * device was hot-removed. Not much we can do but fail the I/O. 2870 */ 2871 if (!buffer_mapped(bh)) { 2872 unlock_buffer(bh); 2873 return -EIO; 2874 } 2875 2876 get_bh(bh); 2877 bh->b_end_io = end_buffer_write_sync; 2878 submit_bh(REQ_OP_WRITE | op_flags, bh); 2879 wait_on_buffer(bh); 2880 if (!buffer_uptodate(bh)) 2881 return -EIO; 2882 } else { 2883 unlock_buffer(bh); 2884 } 2885 return 0; 2886 } 2887 EXPORT_SYMBOL(__sync_dirty_buffer); 2888 2889 int sync_dirty_buffer(struct buffer_head *bh) 2890 { 2891 return __sync_dirty_buffer(bh, REQ_SYNC); 2892 } 2893 EXPORT_SYMBOL(sync_dirty_buffer); 2894 2895 /* 2896 * try_to_free_buffers() checks if all the buffers on this particular folio 2897 * are unused, and releases them if so. 2898 * 2899 * Exclusion against try_to_free_buffers may be obtained by either 2900 * locking the folio or by holding its mapping's private_lock. 2901 * 2902 * If the folio is dirty but all the buffers are clean then we need to 2903 * be sure to mark the folio clean as well. This is because the folio 2904 * may be against a block device, and a later reattachment of buffers 2905 * to a dirty folio will set *all* buffers dirty. Which would corrupt 2906 * filesystem data on the same device. 2907 * 2908 * The same applies to regular filesystem folios: if all the buffers are 2909 * clean then we set the folio clean and proceed. To do that, we require 2910 * total exclusion from block_dirty_folio(). That is obtained with 2911 * private_lock. 2912 * 2913 * try_to_free_buffers() is non-blocking. 2914 */ 2915 static inline int buffer_busy(struct buffer_head *bh) 2916 { 2917 return atomic_read(&bh->b_count) | 2918 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock))); 2919 } 2920 2921 static bool 2922 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free) 2923 { 2924 struct buffer_head *head = folio_buffers(folio); 2925 struct buffer_head *bh; 2926 2927 bh = head; 2928 do { 2929 if (buffer_busy(bh)) 2930 goto failed; 2931 bh = bh->b_this_page; 2932 } while (bh != head); 2933 2934 do { 2935 struct buffer_head *next = bh->b_this_page; 2936 2937 if (bh->b_assoc_map) 2938 __remove_assoc_queue(bh); 2939 bh = next; 2940 } while (bh != head); 2941 *buffers_to_free = head; 2942 folio_detach_private(folio); 2943 return true; 2944 failed: 2945 return false; 2946 } 2947 2948 bool try_to_free_buffers(struct folio *folio) 2949 { 2950 struct address_space * const mapping = folio->mapping; 2951 struct buffer_head *buffers_to_free = NULL; 2952 bool ret = 0; 2953 2954 BUG_ON(!folio_test_locked(folio)); 2955 if (folio_test_writeback(folio)) 2956 return false; 2957 2958 if (mapping == NULL) { /* can this still happen? */ 2959 ret = drop_buffers(folio, &buffers_to_free); 2960 goto out; 2961 } 2962 2963 spin_lock(&mapping->private_lock); 2964 ret = drop_buffers(folio, &buffers_to_free); 2965 2966 /* 2967 * If the filesystem writes its buffers by hand (eg ext3) 2968 * then we can have clean buffers against a dirty folio. We 2969 * clean the folio here; otherwise the VM will never notice 2970 * that the filesystem did any IO at all. 2971 * 2972 * Also, during truncate, discard_buffer will have marked all 2973 * the folio's buffers clean. We discover that here and clean 2974 * the folio also. 2975 * 2976 * private_lock must be held over this entire operation in order 2977 * to synchronise against block_dirty_folio and prevent the 2978 * dirty bit from being lost. 2979 */ 2980 if (ret) 2981 folio_cancel_dirty(folio); 2982 spin_unlock(&mapping->private_lock); 2983 out: 2984 if (buffers_to_free) { 2985 struct buffer_head *bh = buffers_to_free; 2986 2987 do { 2988 struct buffer_head *next = bh->b_this_page; 2989 free_buffer_head(bh); 2990 bh = next; 2991 } while (bh != buffers_to_free); 2992 } 2993 return ret; 2994 } 2995 EXPORT_SYMBOL(try_to_free_buffers); 2996 2997 /* 2998 * Buffer-head allocation 2999 */ 3000 static struct kmem_cache *bh_cachep __read_mostly; 3001 3002 /* 3003 * Once the number of bh's in the machine exceeds this level, we start 3004 * stripping them in writeback. 3005 */ 3006 static unsigned long max_buffer_heads; 3007 3008 int buffer_heads_over_limit; 3009 3010 struct bh_accounting { 3011 int nr; /* Number of live bh's */ 3012 int ratelimit; /* Limit cacheline bouncing */ 3013 }; 3014 3015 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0}; 3016 3017 static void recalc_bh_state(void) 3018 { 3019 int i; 3020 int tot = 0; 3021 3022 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096) 3023 return; 3024 __this_cpu_write(bh_accounting.ratelimit, 0); 3025 for_each_online_cpu(i) 3026 tot += per_cpu(bh_accounting, i).nr; 3027 buffer_heads_over_limit = (tot > max_buffer_heads); 3028 } 3029 3030 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3031 { 3032 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 3033 if (ret) { 3034 INIT_LIST_HEAD(&ret->b_assoc_buffers); 3035 spin_lock_init(&ret->b_uptodate_lock); 3036 preempt_disable(); 3037 __this_cpu_inc(bh_accounting.nr); 3038 recalc_bh_state(); 3039 preempt_enable(); 3040 } 3041 return ret; 3042 } 3043 EXPORT_SYMBOL(alloc_buffer_head); 3044 3045 void free_buffer_head(struct buffer_head *bh) 3046 { 3047 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3048 kmem_cache_free(bh_cachep, bh); 3049 preempt_disable(); 3050 __this_cpu_dec(bh_accounting.nr); 3051 recalc_bh_state(); 3052 preempt_enable(); 3053 } 3054 EXPORT_SYMBOL(free_buffer_head); 3055 3056 static int buffer_exit_cpu_dead(unsigned int cpu) 3057 { 3058 int i; 3059 struct bh_lru *b = &per_cpu(bh_lrus, cpu); 3060 3061 for (i = 0; i < BH_LRU_SIZE; i++) { 3062 brelse(b->bhs[i]); 3063 b->bhs[i] = NULL; 3064 } 3065 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); 3066 per_cpu(bh_accounting, cpu).nr = 0; 3067 return 0; 3068 } 3069 3070 /** 3071 * bh_uptodate_or_lock - Test whether the buffer is uptodate 3072 * @bh: struct buffer_head 3073 * 3074 * Return true if the buffer is up-to-date and false, 3075 * with the buffer locked, if not. 3076 */ 3077 int bh_uptodate_or_lock(struct buffer_head *bh) 3078 { 3079 if (!buffer_uptodate(bh)) { 3080 lock_buffer(bh); 3081 if (!buffer_uptodate(bh)) 3082 return 0; 3083 unlock_buffer(bh); 3084 } 3085 return 1; 3086 } 3087 EXPORT_SYMBOL(bh_uptodate_or_lock); 3088 3089 /** 3090 * __bh_read - Submit read for a locked buffer 3091 * @bh: struct buffer_head 3092 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3093 * @wait: wait until reading finish 3094 * 3095 * Returns zero on success or don't wait, and -EIO on error. 3096 */ 3097 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 3098 { 3099 int ret = 0; 3100 3101 BUG_ON(!buffer_locked(bh)); 3102 3103 get_bh(bh); 3104 bh->b_end_io = end_buffer_read_sync; 3105 submit_bh(REQ_OP_READ | op_flags, bh); 3106 if (wait) { 3107 wait_on_buffer(bh); 3108 if (!buffer_uptodate(bh)) 3109 ret = -EIO; 3110 } 3111 return ret; 3112 } 3113 EXPORT_SYMBOL(__bh_read); 3114 3115 /** 3116 * __bh_read_batch - Submit read for a batch of unlocked buffers 3117 * @nr: entry number of the buffer batch 3118 * @bhs: a batch of struct buffer_head 3119 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ 3120 * @force_lock: force to get a lock on the buffer if set, otherwise drops any 3121 * buffer that cannot lock. 3122 * 3123 * Returns zero on success or don't wait, and -EIO on error. 3124 */ 3125 void __bh_read_batch(int nr, struct buffer_head *bhs[], 3126 blk_opf_t op_flags, bool force_lock) 3127 { 3128 int i; 3129 3130 for (i = 0; i < nr; i++) { 3131 struct buffer_head *bh = bhs[i]; 3132 3133 if (buffer_uptodate(bh)) 3134 continue; 3135 3136 if (force_lock) 3137 lock_buffer(bh); 3138 else 3139 if (!trylock_buffer(bh)) 3140 continue; 3141 3142 if (buffer_uptodate(bh)) { 3143 unlock_buffer(bh); 3144 continue; 3145 } 3146 3147 bh->b_end_io = end_buffer_read_sync; 3148 get_bh(bh); 3149 submit_bh(REQ_OP_READ | op_flags, bh); 3150 } 3151 } 3152 EXPORT_SYMBOL(__bh_read_batch); 3153 3154 void __init buffer_init(void) 3155 { 3156 unsigned long nrpages; 3157 int ret; 3158 3159 bh_cachep = kmem_cache_create("buffer_head", 3160 sizeof(struct buffer_head), 0, 3161 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 3162 SLAB_MEM_SPREAD), 3163 NULL); 3164 3165 /* 3166 * Limit the bh occupancy to 10% of ZONE_NORMAL 3167 */ 3168 nrpages = (nr_free_buffer_pages() * 10) / 100; 3169 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3170 ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead", 3171 NULL, buffer_exit_cpu_dead); 3172 WARN_ON(ret < 0); 3173 } 3174