1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/fs/buffer.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 2002 Linus Torvalds
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds
81da177e4SLinus Torvalds /*
91da177e4SLinus Torvalds * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
101da177e4SLinus Torvalds *
111da177e4SLinus Torvalds * Removed a lot of unnecessary code and simplified things now that
121da177e4SLinus Torvalds * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
131da177e4SLinus Torvalds *
141da177e4SLinus Torvalds * Speed up hash, lru, and free list operations. Use gfp() for allocating
151da177e4SLinus Torvalds * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
161da177e4SLinus Torvalds *
171da177e4SLinus Torvalds * Added 32k buffer block sizes - these are required older ARM systems. - RMK
181da177e4SLinus Torvalds *
191da177e4SLinus Torvalds * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
201da177e4SLinus Torvalds */
211da177e4SLinus Torvalds
221da177e4SLinus Torvalds #include <linux/kernel.h>
23f361bf4aSIngo Molnar #include <linux/sched/signal.h>
241da177e4SLinus Torvalds #include <linux/syscalls.h>
251da177e4SLinus Torvalds #include <linux/fs.h>
26ae259a9cSChristoph Hellwig #include <linux/iomap.h>
271da177e4SLinus Torvalds #include <linux/mm.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/slab.h>
3016f7e0feSRandy Dunlap #include <linux/capability.h>
311da177e4SLinus Torvalds #include <linux/blkdev.h>
321da177e4SLinus Torvalds #include <linux/file.h>
331da177e4SLinus Torvalds #include <linux/quotaops.h>
341da177e4SLinus Torvalds #include <linux/highmem.h>
35630d9c47SPaul Gortmaker #include <linux/export.h>
36bafc0dbaSTejun Heo #include <linux/backing-dev.h>
371da177e4SLinus Torvalds #include <linux/writeback.h>
381da177e4SLinus Torvalds #include <linux/hash.h>
391da177e4SLinus Torvalds #include <linux/suspend.h>
401da177e4SLinus Torvalds #include <linux/buffer_head.h>
4155e829afSAndrew Morton #include <linux/task_io_accounting_ops.h>
421da177e4SLinus Torvalds #include <linux/bio.h>
431da177e4SLinus Torvalds #include <linux/cpu.h>
441da177e4SLinus Torvalds #include <linux/bitops.h>
451da177e4SLinus Torvalds #include <linux/mpage.h>
46fb1c8f93SIngo Molnar #include <linux/bit_spinlock.h>
4729f3ad7dSJan Kara #include <linux/pagevec.h>
48f745c6f5SShakeel Butt #include <linux/sched/mm.h>
495305cb83STejun Heo #include <trace/events/block.h>
5031fb992cSEric Biggers #include <linux/fscrypt.h>
514fa512ceSEric Biggers #include <linux/fsverity.h>
528a237adfSMarcelo Tosatti #include <linux/sched/isolation.h>
531da177e4SLinus Torvalds
542b211dc0SBen Dooks #include "internal.h"
552b211dc0SBen Dooks
561da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
575bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
581420c4a5SBart Van Assche struct writeback_control *wbc);
591da177e4SLinus Torvalds
601da177e4SLinus Torvalds #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
611da177e4SLinus Torvalds
touch_buffer(struct buffer_head * bh)62f0059afdSTejun Heo inline void touch_buffer(struct buffer_head *bh)
63f0059afdSTejun Heo {
645305cb83STejun Heo trace_block_touch_buffer(bh);
6503c5f331SMatthew Wilcox (Oracle) folio_mark_accessed(bh->b_folio);
66f0059afdSTejun Heo }
67f0059afdSTejun Heo EXPORT_SYMBOL(touch_buffer);
68f0059afdSTejun Heo
__lock_buffer(struct buffer_head * bh)69fc9b52cdSHarvey Harrison void __lock_buffer(struct buffer_head *bh)
701da177e4SLinus Torvalds {
7174316201SNeilBrown wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
721da177e4SLinus Torvalds }
731da177e4SLinus Torvalds EXPORT_SYMBOL(__lock_buffer);
741da177e4SLinus Torvalds
unlock_buffer(struct buffer_head * bh)75fc9b52cdSHarvey Harrison void unlock_buffer(struct buffer_head *bh)
761da177e4SLinus Torvalds {
7751b07fc3SNick Piggin clear_bit_unlock(BH_Lock, &bh->b_state);
784e857c58SPeter Zijlstra smp_mb__after_atomic();
791da177e4SLinus Torvalds wake_up_bit(&bh->b_state, BH_Lock);
801da177e4SLinus Torvalds }
811fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(unlock_buffer);
821da177e4SLinus Torvalds
831da177e4SLinus Torvalds /*
84520f301cSMatthew Wilcox (Oracle) * Returns if the folio has dirty or writeback buffers. If all the buffers
85520f301cSMatthew Wilcox (Oracle) * are unlocked and clean then the folio_test_dirty information is stale. If
86520f301cSMatthew Wilcox (Oracle) * any of the buffers are locked, it is assumed they are locked for IO.
87b4597226SMel Gorman */
buffer_check_dirty_writeback(struct folio * folio,bool * dirty,bool * writeback)88520f301cSMatthew Wilcox (Oracle) void buffer_check_dirty_writeback(struct folio *folio,
89b4597226SMel Gorman bool *dirty, bool *writeback)
90b4597226SMel Gorman {
91b4597226SMel Gorman struct buffer_head *head, *bh;
92b4597226SMel Gorman *dirty = false;
93b4597226SMel Gorman *writeback = false;
94b4597226SMel Gorman
95520f301cSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
96b4597226SMel Gorman
97520f301cSMatthew Wilcox (Oracle) head = folio_buffers(folio);
98520f301cSMatthew Wilcox (Oracle) if (!head)
99b4597226SMel Gorman return;
100b4597226SMel Gorman
101520f301cSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
102b4597226SMel Gorman *writeback = true;
103b4597226SMel Gorman
104b4597226SMel Gorman bh = head;
105b4597226SMel Gorman do {
106b4597226SMel Gorman if (buffer_locked(bh))
107b4597226SMel Gorman *writeback = true;
108b4597226SMel Gorman
109b4597226SMel Gorman if (buffer_dirty(bh))
110b4597226SMel Gorman *dirty = true;
111b4597226SMel Gorman
112b4597226SMel Gorman bh = bh->b_this_page;
113b4597226SMel Gorman } while (bh != head);
114b4597226SMel Gorman }
115b4597226SMel Gorman
116b4597226SMel Gorman /*
1171da177e4SLinus Torvalds * Block until a buffer comes unlocked. This doesn't stop it
1181da177e4SLinus Torvalds * from becoming locked again - you have to lock it yourself
1191da177e4SLinus Torvalds * if you want to preserve its state.
1201da177e4SLinus Torvalds */
__wait_on_buffer(struct buffer_head * bh)1211da177e4SLinus Torvalds void __wait_on_buffer(struct buffer_head * bh)
1221da177e4SLinus Torvalds {
12374316201SNeilBrown wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
1241da177e4SLinus Torvalds }
1251fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__wait_on_buffer);
1261da177e4SLinus Torvalds
buffer_io_error(struct buffer_head * bh,char * msg)127b744c2acSRobert Elliott static void buffer_io_error(struct buffer_head *bh, char *msg)
1281da177e4SLinus Torvalds {
129432f16e6SRobert Elliott if (!test_bit(BH_Quiet, &bh->b_state))
130432f16e6SRobert Elliott printk_ratelimited(KERN_ERR
131a1c6f057SDmitry Monakhov "Buffer I/O error on dev %pg, logical block %llu%s\n",
132a1c6f057SDmitry Monakhov bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds
1351da177e4SLinus Torvalds /*
13668671f35SDmitry Monakhov * End-of-IO handler helper function which does not touch the bh after
13768671f35SDmitry Monakhov * unlocking it.
13868671f35SDmitry Monakhov * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
13968671f35SDmitry Monakhov * a race there is benign: unlock_buffer() only use the bh's address for
14068671f35SDmitry Monakhov * hashing after unlocking the buffer, so it doesn't actually touch the bh
14168671f35SDmitry Monakhov * itself.
1421da177e4SLinus Torvalds */
__end_buffer_read_notouch(struct buffer_head * bh,int uptodate)14368671f35SDmitry Monakhov static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds if (uptodate) {
1461da177e4SLinus Torvalds set_buffer_uptodate(bh);
1471da177e4SLinus Torvalds } else {
14870246286SChristoph Hellwig /* This happens, due to failed read-ahead attempts. */
1491da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1501da177e4SLinus Torvalds }
1511da177e4SLinus Torvalds unlock_buffer(bh);
15268671f35SDmitry Monakhov }
15368671f35SDmitry Monakhov
15468671f35SDmitry Monakhov /*
15568671f35SDmitry Monakhov * Default synchronous end-of-IO handler.. Just mark it up-to-date and
15679f59784SZhang Yi * unlock the buffer.
15768671f35SDmitry Monakhov */
end_buffer_read_sync(struct buffer_head * bh,int uptodate)15868671f35SDmitry Monakhov void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
15968671f35SDmitry Monakhov {
16068671f35SDmitry Monakhov __end_buffer_read_notouch(bh, uptodate);
1611da177e4SLinus Torvalds put_bh(bh);
1621da177e4SLinus Torvalds }
1631fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_read_sync);
1641da177e4SLinus Torvalds
end_buffer_write_sync(struct buffer_head * bh,int uptodate)1651da177e4SLinus Torvalds void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1661da177e4SLinus Torvalds {
1671da177e4SLinus Torvalds if (uptodate) {
1681da177e4SLinus Torvalds set_buffer_uptodate(bh);
1691da177e4SLinus Torvalds } else {
170b744c2acSRobert Elliott buffer_io_error(bh, ", lost sync page write");
17187354e5dSJeff Layton mark_buffer_write_io_error(bh);
1721da177e4SLinus Torvalds clear_buffer_uptodate(bh);
1731da177e4SLinus Torvalds }
1741da177e4SLinus Torvalds unlock_buffer(bh);
1751da177e4SLinus Torvalds put_bh(bh);
1761da177e4SLinus Torvalds }
1771fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_write_sync);
1781da177e4SLinus Torvalds
1791da177e4SLinus Torvalds /*
1801da177e4SLinus Torvalds * Various filesystems appear to want __find_get_block to be non-blocking.
1811da177e4SLinus Torvalds * But it's the page lock which protects the buffers. To get around this,
1821da177e4SLinus Torvalds * we get exclusion from try_to_free_buffers with the blockdev mapping's
1831da177e4SLinus Torvalds * private_lock.
1841da177e4SLinus Torvalds *
185b93b0163SMatthew Wilcox * Hack idea: for the blockdev mapping, private_lock contention
1861da177e4SLinus Torvalds * may be quite high. This code could TryLock the page, and if that
187b93b0163SMatthew Wilcox * succeeds, there is no need to take private_lock.
1881da177e4SLinus Torvalds */
1891da177e4SLinus Torvalds static struct buffer_head *
__find_get_block_slow(struct block_device * bdev,sector_t block)190385fd4c5SCoywolf Qi Hunt __find_get_block_slow(struct block_device *bdev, sector_t block)
1911da177e4SLinus Torvalds {
1921da177e4SLinus Torvalds struct inode *bd_inode = bdev->bd_inode;
1931da177e4SLinus Torvalds struct address_space *bd_mapping = bd_inode->i_mapping;
1941da177e4SLinus Torvalds struct buffer_head *ret = NULL;
1951da177e4SLinus Torvalds pgoff_t index;
1961da177e4SLinus Torvalds struct buffer_head *bh;
1971da177e4SLinus Torvalds struct buffer_head *head;
198eee25182SMatthew Wilcox (Oracle) struct folio *folio;
1991da177e4SLinus Torvalds int all_mapped = 1;
20043636c80STetsuo Handa static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
2011da177e4SLinus Torvalds
20209cbfeafSKirill A. Shutemov index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
203eee25182SMatthew Wilcox (Oracle) folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204eee25182SMatthew Wilcox (Oracle) if (IS_ERR(folio))
2051da177e4SLinus Torvalds goto out;
2061da177e4SLinus Torvalds
2071da177e4SLinus Torvalds spin_lock(&bd_mapping->private_lock);
208eee25182SMatthew Wilcox (Oracle) head = folio_buffers(folio);
209eee25182SMatthew Wilcox (Oracle) if (!head)
2101da177e4SLinus Torvalds goto out_unlock;
2111da177e4SLinus Torvalds bh = head;
2121da177e4SLinus Torvalds do {
21397f76d3dSNikanth Karthikesan if (!buffer_mapped(bh))
21497f76d3dSNikanth Karthikesan all_mapped = 0;
21597f76d3dSNikanth Karthikesan else if (bh->b_blocknr == block) {
2161da177e4SLinus Torvalds ret = bh;
2171da177e4SLinus Torvalds get_bh(bh);
2181da177e4SLinus Torvalds goto out_unlock;
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds bh = bh->b_this_page;
2211da177e4SLinus Torvalds } while (bh != head);
2221da177e4SLinus Torvalds
2231da177e4SLinus Torvalds /* we might be here because some of the buffers on this page are
2241da177e4SLinus Torvalds * not mapped. This is due to various races between
2251da177e4SLinus Torvalds * file io on the block device and getblk. It gets dealt with
2261da177e4SLinus Torvalds * elsewhere, don't buffer_error if we had some unmapped buffers
2271da177e4SLinus Torvalds */
22843636c80STetsuo Handa ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
22943636c80STetsuo Handa if (all_mapped && __ratelimit(&last_warned)) {
23043636c80STetsuo Handa printk("__find_get_block_slow() failed. block=%llu, "
23143636c80STetsuo Handa "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
23243636c80STetsuo Handa "device %pg blocksize: %d\n",
233205f87f6SBadari Pulavarty (unsigned long long)block,
23443636c80STetsuo Handa (unsigned long long)bh->b_blocknr,
23543636c80STetsuo Handa bh->b_state, bh->b_size, bdev,
23672a2ebd8STao Ma 1 << bd_inode->i_blkbits);
2371da177e4SLinus Torvalds }
2381da177e4SLinus Torvalds out_unlock:
2391da177e4SLinus Torvalds spin_unlock(&bd_mapping->private_lock);
240eee25182SMatthew Wilcox (Oracle) folio_put(folio);
2411da177e4SLinus Torvalds out:
2421da177e4SLinus Torvalds return ret;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds
end_buffer_async_read(struct buffer_head * bh,int uptodate)2451da177e4SLinus Torvalds static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
2461da177e4SLinus Torvalds {
2471da177e4SLinus Torvalds unsigned long flags;
248a3972203SNick Piggin struct buffer_head *first;
2491da177e4SLinus Torvalds struct buffer_head *tmp;
2502e2dba15SMatthew Wilcox (Oracle) struct folio *folio;
2512e2dba15SMatthew Wilcox (Oracle) int folio_uptodate = 1;
2521da177e4SLinus Torvalds
2531da177e4SLinus Torvalds BUG_ON(!buffer_async_read(bh));
2541da177e4SLinus Torvalds
2552e2dba15SMatthew Wilcox (Oracle) folio = bh->b_folio;
2561da177e4SLinus Torvalds if (uptodate) {
2571da177e4SLinus Torvalds set_buffer_uptodate(bh);
2581da177e4SLinus Torvalds } else {
2591da177e4SLinus Torvalds clear_buffer_uptodate(bh);
260b744c2acSRobert Elliott buffer_io_error(bh, ", async page read");
2612e2dba15SMatthew Wilcox (Oracle) folio_set_error(folio);
2621da177e4SLinus Torvalds }
2631da177e4SLinus Torvalds
2641da177e4SLinus Torvalds /*
2651da177e4SLinus Torvalds * Be _very_ careful from here on. Bad things can happen if
2661da177e4SLinus Torvalds * two buffer heads end IO at almost the same time and both
2671da177e4SLinus Torvalds * decide that the page is now completely done.
2681da177e4SLinus Torvalds */
2692e2dba15SMatthew Wilcox (Oracle) first = folio_buffers(folio);
270f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
2711da177e4SLinus Torvalds clear_buffer_async_read(bh);
2721da177e4SLinus Torvalds unlock_buffer(bh);
2731da177e4SLinus Torvalds tmp = bh;
2741da177e4SLinus Torvalds do {
2751da177e4SLinus Torvalds if (!buffer_uptodate(tmp))
2762e2dba15SMatthew Wilcox (Oracle) folio_uptodate = 0;
2771da177e4SLinus Torvalds if (buffer_async_read(tmp)) {
2781da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
2791da177e4SLinus Torvalds goto still_busy;
2801da177e4SLinus Torvalds }
2811da177e4SLinus Torvalds tmp = tmp->b_this_page;
2821da177e4SLinus Torvalds } while (tmp != bh);
283f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2841da177e4SLinus Torvalds
2851da177e4SLinus Torvalds /*
2866e8e79fcSMatthew Wilcox (Oracle) * If all of the buffers are uptodate then we can set the page
2876e8e79fcSMatthew Wilcox (Oracle) * uptodate.
2881da177e4SLinus Torvalds */
2892e2dba15SMatthew Wilcox (Oracle) if (folio_uptodate)
2902e2dba15SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
2912e2dba15SMatthew Wilcox (Oracle) folio_unlock(folio);
2921da177e4SLinus Torvalds return;
2931da177e4SLinus Torvalds
2941da177e4SLinus Torvalds still_busy:
295f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
2961da177e4SLinus Torvalds return;
2971da177e4SLinus Torvalds }
2981da177e4SLinus Torvalds
2994fa512ceSEric Biggers struct postprocess_bh_ctx {
30031fb992cSEric Biggers struct work_struct work;
30131fb992cSEric Biggers struct buffer_head *bh;
30231fb992cSEric Biggers };
30331fb992cSEric Biggers
verify_bh(struct work_struct * work)3044fa512ceSEric Biggers static void verify_bh(struct work_struct *work)
3054fa512ceSEric Biggers {
3064fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3074fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
3084fa512ceSEric Biggers struct buffer_head *bh = ctx->bh;
3094fa512ceSEric Biggers bool valid;
3104fa512ceSEric Biggers
3118b7d3fe9SEric Biggers valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
3124fa512ceSEric Biggers end_buffer_async_read(bh, valid);
3134fa512ceSEric Biggers kfree(ctx);
3144fa512ceSEric Biggers }
3154fa512ceSEric Biggers
need_fsverity(struct buffer_head * bh)3164fa512ceSEric Biggers static bool need_fsverity(struct buffer_head *bh)
3174fa512ceSEric Biggers {
3188b7d3fe9SEric Biggers struct folio *folio = bh->b_folio;
3198b7d3fe9SEric Biggers struct inode *inode = folio->mapping->host;
3204fa512ceSEric Biggers
3214fa512ceSEric Biggers return fsverity_active(inode) &&
3224fa512ceSEric Biggers /* needed by ext4 */
3238b7d3fe9SEric Biggers folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
3244fa512ceSEric Biggers }
3254fa512ceSEric Biggers
decrypt_bh(struct work_struct * work)32631fb992cSEric Biggers static void decrypt_bh(struct work_struct *work)
32731fb992cSEric Biggers {
3284fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3294fa512ceSEric Biggers container_of(work, struct postprocess_bh_ctx, work);
33031fb992cSEric Biggers struct buffer_head *bh = ctx->bh;
33131fb992cSEric Biggers int err;
33231fb992cSEric Biggers
3339c7fb7f7SEric Biggers err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
3349c7fb7f7SEric Biggers bh_offset(bh));
3354fa512ceSEric Biggers if (err == 0 && need_fsverity(bh)) {
3364fa512ceSEric Biggers /*
3374fa512ceSEric Biggers * We use different work queues for decryption and for verity
3384fa512ceSEric Biggers * because verity may require reading metadata pages that need
3394fa512ceSEric Biggers * decryption, and we shouldn't recurse to the same workqueue.
3404fa512ceSEric Biggers */
3414fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3424fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3434fa512ceSEric Biggers return;
3444fa512ceSEric Biggers }
34531fb992cSEric Biggers end_buffer_async_read(bh, err == 0);
34631fb992cSEric Biggers kfree(ctx);
34731fb992cSEric Biggers }
34831fb992cSEric Biggers
34931fb992cSEric Biggers /*
3502c69e205SMatthew Wilcox (Oracle) * I/O completion handler for block_read_full_folio() - pages
35131fb992cSEric Biggers * which come unlocked at the end of I/O.
35231fb992cSEric Biggers */
end_buffer_async_read_io(struct buffer_head * bh,int uptodate)35331fb992cSEric Biggers static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
35431fb992cSEric Biggers {
3553822a7c4SLinus Torvalds struct inode *inode = bh->b_folio->mapping->host;
3564fa512ceSEric Biggers bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
3574fa512ceSEric Biggers bool verify = need_fsverity(bh);
3584fa512ceSEric Biggers
3594fa512ceSEric Biggers /* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
3604fa512ceSEric Biggers if (uptodate && (decrypt || verify)) {
3614fa512ceSEric Biggers struct postprocess_bh_ctx *ctx =
3624fa512ceSEric Biggers kmalloc(sizeof(*ctx), GFP_ATOMIC);
36331fb992cSEric Biggers
36431fb992cSEric Biggers if (ctx) {
36531fb992cSEric Biggers ctx->bh = bh;
3664fa512ceSEric Biggers if (decrypt) {
3674fa512ceSEric Biggers INIT_WORK(&ctx->work, decrypt_bh);
36831fb992cSEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work);
3694fa512ceSEric Biggers } else {
3704fa512ceSEric Biggers INIT_WORK(&ctx->work, verify_bh);
3714fa512ceSEric Biggers fsverity_enqueue_verify_work(&ctx->work);
3724fa512ceSEric Biggers }
37331fb992cSEric Biggers return;
37431fb992cSEric Biggers }
37531fb992cSEric Biggers uptodate = 0;
37631fb992cSEric Biggers }
37731fb992cSEric Biggers end_buffer_async_read(bh, uptodate);
37831fb992cSEric Biggers }
37931fb992cSEric Biggers
3801da177e4SLinus Torvalds /*
3811da177e4SLinus Torvalds * Completion handler for block_write_full_page() - pages which are unlocked
3821da177e4SLinus Torvalds * during I/O, and which have PageWriteback cleared upon I/O completion.
3831da177e4SLinus Torvalds */
end_buffer_async_write(struct buffer_head * bh,int uptodate)38435c80d5fSChris Mason void end_buffer_async_write(struct buffer_head *bh, int uptodate)
3851da177e4SLinus Torvalds {
3861da177e4SLinus Torvalds unsigned long flags;
387a3972203SNick Piggin struct buffer_head *first;
3881da177e4SLinus Torvalds struct buffer_head *tmp;
389743ed81eSMatthew Wilcox (Oracle) struct folio *folio;
3901da177e4SLinus Torvalds
3911da177e4SLinus Torvalds BUG_ON(!buffer_async_write(bh));
3921da177e4SLinus Torvalds
393743ed81eSMatthew Wilcox (Oracle) folio = bh->b_folio;
3941da177e4SLinus Torvalds if (uptodate) {
3951da177e4SLinus Torvalds set_buffer_uptodate(bh);
3961da177e4SLinus Torvalds } else {
397b744c2acSRobert Elliott buffer_io_error(bh, ", lost async page write");
39887354e5dSJeff Layton mark_buffer_write_io_error(bh);
3991da177e4SLinus Torvalds clear_buffer_uptodate(bh);
400743ed81eSMatthew Wilcox (Oracle) folio_set_error(folio);
4011da177e4SLinus Torvalds }
4021da177e4SLinus Torvalds
403743ed81eSMatthew Wilcox (Oracle) first = folio_buffers(folio);
404f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
405a3972203SNick Piggin
4061da177e4SLinus Torvalds clear_buffer_async_write(bh);
4071da177e4SLinus Torvalds unlock_buffer(bh);
4081da177e4SLinus Torvalds tmp = bh->b_this_page;
4091da177e4SLinus Torvalds while (tmp != bh) {
4101da177e4SLinus Torvalds if (buffer_async_write(tmp)) {
4111da177e4SLinus Torvalds BUG_ON(!buffer_locked(tmp));
4121da177e4SLinus Torvalds goto still_busy;
4131da177e4SLinus Torvalds }
4141da177e4SLinus Torvalds tmp = tmp->b_this_page;
4151da177e4SLinus Torvalds }
416f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
417743ed81eSMatthew Wilcox (Oracle) folio_end_writeback(folio);
4181da177e4SLinus Torvalds return;
4191da177e4SLinus Torvalds
4201da177e4SLinus Torvalds still_busy:
421f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
4221da177e4SLinus Torvalds return;
4231da177e4SLinus Torvalds }
4241fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(end_buffer_async_write);
4251da177e4SLinus Torvalds
4261da177e4SLinus Torvalds /*
4271da177e4SLinus Torvalds * If a page's buffers are under async readin (end_buffer_async_read
4281da177e4SLinus Torvalds * completion) then there is a possibility that another thread of
4291da177e4SLinus Torvalds * control could lock one of the buffers after it has completed
4301da177e4SLinus Torvalds * but while some of the other buffers have not completed. This
4311da177e4SLinus Torvalds * locked buffer would confuse end_buffer_async_read() into not unlocking
4321da177e4SLinus Torvalds * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
4331da177e4SLinus Torvalds * that this buffer is not under async I/O.
4341da177e4SLinus Torvalds *
4351da177e4SLinus Torvalds * The page comes unlocked when it has no locked buffer_async buffers
4361da177e4SLinus Torvalds * left.
4371da177e4SLinus Torvalds *
4381da177e4SLinus Torvalds * PageLocked prevents anyone starting new async I/O reads any of
4391da177e4SLinus Torvalds * the buffers.
4401da177e4SLinus Torvalds *
4411da177e4SLinus Torvalds * PageWriteback is used to prevent simultaneous writeout of the same
4421da177e4SLinus Torvalds * page.
4431da177e4SLinus Torvalds *
4441da177e4SLinus Torvalds * PageLocked prevents anyone from starting writeback of a page which is
4451da177e4SLinus Torvalds * under read I/O (PageWriteback is only ever set against a locked page).
4461da177e4SLinus Torvalds */
mark_buffer_async_read(struct buffer_head * bh)4471da177e4SLinus Torvalds static void mark_buffer_async_read(struct buffer_head *bh)
4481da177e4SLinus Torvalds {
44931fb992cSEric Biggers bh->b_end_io = end_buffer_async_read_io;
4501da177e4SLinus Torvalds set_buffer_async_read(bh);
4511da177e4SLinus Torvalds }
4521da177e4SLinus Torvalds
mark_buffer_async_write_endio(struct buffer_head * bh,bh_end_io_t * handler)4531fe72eaaSH Hartley Sweeten static void mark_buffer_async_write_endio(struct buffer_head *bh,
45435c80d5fSChris Mason bh_end_io_t *handler)
45535c80d5fSChris Mason {
45635c80d5fSChris Mason bh->b_end_io = handler;
45735c80d5fSChris Mason set_buffer_async_write(bh);
45835c80d5fSChris Mason }
45935c80d5fSChris Mason
mark_buffer_async_write(struct buffer_head * bh)4601da177e4SLinus Torvalds void mark_buffer_async_write(struct buffer_head *bh)
4611da177e4SLinus Torvalds {
46235c80d5fSChris Mason mark_buffer_async_write_endio(bh, end_buffer_async_write);
4631da177e4SLinus Torvalds }
4641da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_async_write);
4651da177e4SLinus Torvalds
4661da177e4SLinus Torvalds
4671da177e4SLinus Torvalds /*
4681da177e4SLinus Torvalds * fs/buffer.c contains helper functions for buffer-backed address space's
4691da177e4SLinus Torvalds * fsync functions. A common requirement for buffer-based filesystems is
4701da177e4SLinus Torvalds * that certain data from the backing blockdev needs to be written out for
4711da177e4SLinus Torvalds * a successful fsync(). For example, ext2 indirect blocks need to be
4721da177e4SLinus Torvalds * written back and waited upon before fsync() returns.
4731da177e4SLinus Torvalds *
4741da177e4SLinus Torvalds * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
4751da177e4SLinus Torvalds * inode_has_buffers() and invalidate_inode_buffers() are provided for the
4761da177e4SLinus Torvalds * management of a list of dependent buffers at ->i_mapping->private_list.
4771da177e4SLinus Torvalds *
4781da177e4SLinus Torvalds * Locking is a little subtle: try_to_free_buffers() will remove buffers
4791da177e4SLinus Torvalds * from their controlling inode's queue when they are being freed. But
4801da177e4SLinus Torvalds * try_to_free_buffers() will be operating against the *blockdev* mapping
4811da177e4SLinus Torvalds * at the time, not against the S_ISREG file which depends on those buffers.
4821da177e4SLinus Torvalds * So the locking for private_list is via the private_lock in the address_space
4831da177e4SLinus Torvalds * which backs the buffers. Which is different from the address_space
4841da177e4SLinus Torvalds * against which the buffers are listed. So for a particular address_space,
4851da177e4SLinus Torvalds * mapping->private_lock does *not* protect mapping->private_list! In fact,
4861da177e4SLinus Torvalds * mapping->private_list will always be protected by the backing blockdev's
4871da177e4SLinus Torvalds * ->private_lock.
4881da177e4SLinus Torvalds *
4891da177e4SLinus Torvalds * Which introduces a requirement: all buffers on an address_space's
4901da177e4SLinus Torvalds * ->private_list must be from the same address_space: the blockdev's.
4911da177e4SLinus Torvalds *
4921da177e4SLinus Torvalds * address_spaces which do not place buffers at ->private_list via these
4931da177e4SLinus Torvalds * utility functions are free to use private_lock and private_list for
4941da177e4SLinus Torvalds * whatever they want. The only requirement is that list_empty(private_list)
4951da177e4SLinus Torvalds * be true at clear_inode() time.
4961da177e4SLinus Torvalds *
4971da177e4SLinus Torvalds * FIXME: clear_inode should not call invalidate_inode_buffers(). The
4981da177e4SLinus Torvalds * filesystems should do that. invalidate_inode_buffers() should just go
4991da177e4SLinus Torvalds * BUG_ON(!list_empty).
5001da177e4SLinus Torvalds *
5011da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
5021da177e4SLinus Torvalds * take an address_space, not an inode. And it should be called
5031da177e4SLinus Torvalds * mark_buffer_dirty_fsync() to clearly define why those buffers are being
5041da177e4SLinus Torvalds * queued up.
5051da177e4SLinus Torvalds *
5061da177e4SLinus Torvalds * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
5071da177e4SLinus Torvalds * list if it is already on a list. Because if the buffer is on a list,
5081da177e4SLinus Torvalds * it *must* already be on the right one. If not, the filesystem is being
5091da177e4SLinus Torvalds * silly. This will save a ton of locking. But first we have to ensure
5101da177e4SLinus Torvalds * that buffers are taken *off* the old inode's list when they are freed
5111da177e4SLinus Torvalds * (presumably in truncate). That requires careful auditing of all
5121da177e4SLinus Torvalds * filesystems (do it inside bforget()). It could also be done by bringing
5131da177e4SLinus Torvalds * b_inode back.
5141da177e4SLinus Torvalds */
5151da177e4SLinus Torvalds
5161da177e4SLinus Torvalds /*
5171da177e4SLinus Torvalds * The buffer's backing address_space's private_lock must be held
5181da177e4SLinus Torvalds */
__remove_assoc_queue(struct buffer_head * bh)519dbacefc9SThomas Petazzoni static void __remove_assoc_queue(struct buffer_head *bh)
5201da177e4SLinus Torvalds {
5211da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
52258ff407bSJan Kara WARN_ON(!bh->b_assoc_map);
52358ff407bSJan Kara bh->b_assoc_map = NULL;
5241da177e4SLinus Torvalds }
5251da177e4SLinus Torvalds
inode_has_buffers(struct inode * inode)5261da177e4SLinus Torvalds int inode_has_buffers(struct inode *inode)
5271da177e4SLinus Torvalds {
5281da177e4SLinus Torvalds return !list_empty(&inode->i_data.private_list);
5291da177e4SLinus Torvalds }
5301da177e4SLinus Torvalds
5311da177e4SLinus Torvalds /*
5321da177e4SLinus Torvalds * osync is designed to support O_SYNC io. It waits synchronously for
5331da177e4SLinus Torvalds * all already-submitted IO to complete, but does not queue any new
5341da177e4SLinus Torvalds * writes to the disk.
5351da177e4SLinus Torvalds *
53679f59784SZhang Yi * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
53779f59784SZhang Yi * as you dirty the buffers, and then use osync_inode_buffers to wait for
5381da177e4SLinus Torvalds * completion. Any other dirty buffers which are not yet queued for
5391da177e4SLinus Torvalds * write will not be flushed to disk by the osync.
5401da177e4SLinus Torvalds */
osync_buffers_list(spinlock_t * lock,struct list_head * list)5411da177e4SLinus Torvalds static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
5421da177e4SLinus Torvalds {
5431da177e4SLinus Torvalds struct buffer_head *bh;
5441da177e4SLinus Torvalds struct list_head *p;
5451da177e4SLinus Torvalds int err = 0;
5461da177e4SLinus Torvalds
5471da177e4SLinus Torvalds spin_lock(lock);
5481da177e4SLinus Torvalds repeat:
5491da177e4SLinus Torvalds list_for_each_prev(p, list) {
5501da177e4SLinus Torvalds bh = BH_ENTRY(p);
5511da177e4SLinus Torvalds if (buffer_locked(bh)) {
5521da177e4SLinus Torvalds get_bh(bh);
5531da177e4SLinus Torvalds spin_unlock(lock);
5541da177e4SLinus Torvalds wait_on_buffer(bh);
5551da177e4SLinus Torvalds if (!buffer_uptodate(bh))
5561da177e4SLinus Torvalds err = -EIO;
5571da177e4SLinus Torvalds brelse(bh);
5581da177e4SLinus Torvalds spin_lock(lock);
5591da177e4SLinus Torvalds goto repeat;
5601da177e4SLinus Torvalds }
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds spin_unlock(lock);
5631da177e4SLinus Torvalds return err;
5641da177e4SLinus Torvalds }
5651da177e4SLinus Torvalds
5661da177e4SLinus Torvalds /**
56778a4a50aSRandy Dunlap * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
56867be2dd1SMartin Waitz * @mapping: the mapping which wants those buffers written
5691da177e4SLinus Torvalds *
5701da177e4SLinus Torvalds * Starts I/O against the buffers at mapping->private_list, and waits upon
5711da177e4SLinus Torvalds * that I/O.
5721da177e4SLinus Torvalds *
57367be2dd1SMartin Waitz * Basically, this is a convenience function for fsync().
57467be2dd1SMartin Waitz * @mapping is a file or directory which needs those buffers to be written for
57567be2dd1SMartin Waitz * a successful fsync().
5761da177e4SLinus Torvalds */
sync_mapping_buffers(struct address_space * mapping)5771da177e4SLinus Torvalds int sync_mapping_buffers(struct address_space *mapping)
5781da177e4SLinus Torvalds {
579252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data;
5801da177e4SLinus Torvalds
5811da177e4SLinus Torvalds if (buffer_mapping == NULL || list_empty(&mapping->private_list))
5821da177e4SLinus Torvalds return 0;
5831da177e4SLinus Torvalds
5841da177e4SLinus Torvalds return fsync_buffers_list(&buffer_mapping->private_lock,
5851da177e4SLinus Torvalds &mapping->private_list);
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds EXPORT_SYMBOL(sync_mapping_buffers);
5881da177e4SLinus Torvalds
58931b2ebc0SRitesh Harjani (IBM) /**
59031b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync_noflush - generic buffer fsync implementation
59131b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
59231b2ebc0SRitesh Harjani (IBM) *
59331b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
59431b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
59531b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
59631b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
59731b2ebc0SRitesh Harjani (IBM) *
59831b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
59931b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
60031b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure.
60131b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync_noflush(struct file * file,loff_t start,loff_t end,bool datasync)60231b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
60331b2ebc0SRitesh Harjani (IBM) bool datasync)
60431b2ebc0SRitesh Harjani (IBM) {
60531b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
60631b2ebc0SRitesh Harjani (IBM) int err;
60731b2ebc0SRitesh Harjani (IBM) int ret;
60831b2ebc0SRitesh Harjani (IBM)
60931b2ebc0SRitesh Harjani (IBM) err = file_write_and_wait_range(file, start, end);
61031b2ebc0SRitesh Harjani (IBM) if (err)
61131b2ebc0SRitesh Harjani (IBM) return err;
61231b2ebc0SRitesh Harjani (IBM)
61331b2ebc0SRitesh Harjani (IBM) ret = sync_mapping_buffers(inode->i_mapping);
61431b2ebc0SRitesh Harjani (IBM) if (!(inode->i_state & I_DIRTY_ALL))
61531b2ebc0SRitesh Harjani (IBM) goto out;
61631b2ebc0SRitesh Harjani (IBM) if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
61731b2ebc0SRitesh Harjani (IBM) goto out;
61831b2ebc0SRitesh Harjani (IBM)
61931b2ebc0SRitesh Harjani (IBM) err = sync_inode_metadata(inode, 1);
62031b2ebc0SRitesh Harjani (IBM) if (ret == 0)
62131b2ebc0SRitesh Harjani (IBM) ret = err;
62231b2ebc0SRitesh Harjani (IBM)
62331b2ebc0SRitesh Harjani (IBM) out:
62431b2ebc0SRitesh Harjani (IBM) /* check and advance again to catch errors after syncing out buffers */
62531b2ebc0SRitesh Harjani (IBM) err = file_check_and_advance_wb_err(file);
62631b2ebc0SRitesh Harjani (IBM) if (ret == 0)
62731b2ebc0SRitesh Harjani (IBM) ret = err;
62831b2ebc0SRitesh Harjani (IBM) return ret;
62931b2ebc0SRitesh Harjani (IBM) }
63031b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync_noflush);
63131b2ebc0SRitesh Harjani (IBM)
63231b2ebc0SRitesh Harjani (IBM) /**
63331b2ebc0SRitesh Harjani (IBM) * generic_buffers_fsync - generic buffer fsync implementation
63431b2ebc0SRitesh Harjani (IBM) * for simple filesystems with no inode lock
63531b2ebc0SRitesh Harjani (IBM) *
63631b2ebc0SRitesh Harjani (IBM) * @file: file to synchronize
63731b2ebc0SRitesh Harjani (IBM) * @start: start offset in bytes
63831b2ebc0SRitesh Harjani (IBM) * @end: end offset in bytes (inclusive)
63931b2ebc0SRitesh Harjani (IBM) * @datasync: only synchronize essential metadata if true
64031b2ebc0SRitesh Harjani (IBM) *
64131b2ebc0SRitesh Harjani (IBM) * This is a generic implementation of the fsync method for simple
64231b2ebc0SRitesh Harjani (IBM) * filesystems which track all non-inode metadata in the buffers list
64331b2ebc0SRitesh Harjani (IBM) * hanging off the address_space structure. This also makes sure that
64431b2ebc0SRitesh Harjani (IBM) * a device cache flush operation is called at the end.
64531b2ebc0SRitesh Harjani (IBM) */
generic_buffers_fsync(struct file * file,loff_t start,loff_t end,bool datasync)64631b2ebc0SRitesh Harjani (IBM) int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
64731b2ebc0SRitesh Harjani (IBM) bool datasync)
64831b2ebc0SRitesh Harjani (IBM) {
64931b2ebc0SRitesh Harjani (IBM) struct inode *inode = file->f_mapping->host;
65031b2ebc0SRitesh Harjani (IBM) int ret;
65131b2ebc0SRitesh Harjani (IBM)
65231b2ebc0SRitesh Harjani (IBM) ret = generic_buffers_fsync_noflush(file, start, end, datasync);
65331b2ebc0SRitesh Harjani (IBM) if (!ret)
65431b2ebc0SRitesh Harjani (IBM) ret = blkdev_issue_flush(inode->i_sb->s_bdev);
65531b2ebc0SRitesh Harjani (IBM) return ret;
65631b2ebc0SRitesh Harjani (IBM) }
65731b2ebc0SRitesh Harjani (IBM) EXPORT_SYMBOL(generic_buffers_fsync);
65831b2ebc0SRitesh Harjani (IBM)
6591da177e4SLinus Torvalds /*
6601da177e4SLinus Torvalds * Called when we've recently written block `bblock', and it is known that
6611da177e4SLinus Torvalds * `bblock' was for a buffer_boundary() buffer. This means that the block at
6621da177e4SLinus Torvalds * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
6631da177e4SLinus Torvalds * dirty, schedule it for IO. So that indirects merge nicely with their data.
6641da177e4SLinus Torvalds */
write_boundary_block(struct block_device * bdev,sector_t bblock,unsigned blocksize)6651da177e4SLinus Torvalds void write_boundary_block(struct block_device *bdev,
6661da177e4SLinus Torvalds sector_t bblock, unsigned blocksize)
6671da177e4SLinus Torvalds {
6681da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
6691da177e4SLinus Torvalds if (bh) {
6701da177e4SLinus Torvalds if (buffer_dirty(bh))
671e7ea1129SZhang Yi write_dirty_buffer(bh, 0);
6721da177e4SLinus Torvalds put_bh(bh);
6731da177e4SLinus Torvalds }
6741da177e4SLinus Torvalds }
6751da177e4SLinus Torvalds
mark_buffer_dirty_inode(struct buffer_head * bh,struct inode * inode)6761da177e4SLinus Torvalds void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
6771da177e4SLinus Torvalds {
6781da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
679abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
6801da177e4SLinus Torvalds
6811da177e4SLinus Torvalds mark_buffer_dirty(bh);
682252aa6f5SRafael Aquini if (!mapping->private_data) {
683252aa6f5SRafael Aquini mapping->private_data = buffer_mapping;
6841da177e4SLinus Torvalds } else {
685252aa6f5SRafael Aquini BUG_ON(mapping->private_data != buffer_mapping);
6861da177e4SLinus Torvalds }
687535ee2fbSJan Kara if (!bh->b_assoc_map) {
6881da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock);
6891da177e4SLinus Torvalds list_move_tail(&bh->b_assoc_buffers,
6901da177e4SLinus Torvalds &mapping->private_list);
69158ff407bSJan Kara bh->b_assoc_map = mapping;
6921da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock);
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds }
6951da177e4SLinus Torvalds EXPORT_SYMBOL(mark_buffer_dirty_inode);
6961da177e4SLinus Torvalds
6971da177e4SLinus Torvalds /*
6981da177e4SLinus Torvalds * Add a page to the dirty page list.
6991da177e4SLinus Torvalds *
7001da177e4SLinus Torvalds * It is a sad fact of life that this function is called from several places
7011da177e4SLinus Torvalds * deeply under spinlocking. It may not sleep.
7021da177e4SLinus Torvalds *
7031da177e4SLinus Torvalds * If the page has buffers, the uptodate buffers are set dirty, to preserve
7041da177e4SLinus Torvalds * dirty-state coherency between the page and the buffers. It the page does
7051da177e4SLinus Torvalds * not have buffers then when they are later attached they will all be set
7061da177e4SLinus Torvalds * dirty.
7071da177e4SLinus Torvalds *
7081da177e4SLinus Torvalds * The buffers are dirtied before the page is dirtied. There's a small race
7091da177e4SLinus Torvalds * window in which a writepage caller may see the page cleanness but not the
7101da177e4SLinus Torvalds * buffer dirtiness. That's fine. If this code were to set the page dirty
7111da177e4SLinus Torvalds * before the buffers, a concurrent writepage caller could clear the page dirty
7121da177e4SLinus Torvalds * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
7131da177e4SLinus Torvalds * page on the dirty page list.
7141da177e4SLinus Torvalds *
7151da177e4SLinus Torvalds * We use private_lock to lock against try_to_free_buffers while using the
7161da177e4SLinus Torvalds * page's buffer list. Also use this to protect against clean buffers being
7171da177e4SLinus Torvalds * added to the page after it was set dirty.
7181da177e4SLinus Torvalds *
7191da177e4SLinus Torvalds * FIXME: may need to call ->reservepage here as well. That's rather up to the
7201da177e4SLinus Torvalds * address_space though.
7211da177e4SLinus Torvalds */
block_dirty_folio(struct address_space * mapping,struct folio * folio)722e621900aSMatthew Wilcox (Oracle) bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
7231da177e4SLinus Torvalds {
724e621900aSMatthew Wilcox (Oracle) struct buffer_head *head;
725e621900aSMatthew Wilcox (Oracle) bool newly_dirty;
7261da177e4SLinus Torvalds
7271da177e4SLinus Torvalds spin_lock(&mapping->private_lock);
728e621900aSMatthew Wilcox (Oracle) head = folio_buffers(folio);
729e621900aSMatthew Wilcox (Oracle) if (head) {
7301da177e4SLinus Torvalds struct buffer_head *bh = head;
7311da177e4SLinus Torvalds
7321da177e4SLinus Torvalds do {
7331da177e4SLinus Torvalds set_buffer_dirty(bh);
7341da177e4SLinus Torvalds bh = bh->b_this_page;
7351da177e4SLinus Torvalds } while (bh != head);
7361da177e4SLinus Torvalds }
737c4843a75SGreg Thelen /*
738bcfe06bfSRoman Gushchin * Lock out page's memcg migration to keep PageDirty
73981f8c3a4SJohannes Weiner * synchronized with per-memcg dirty page counters.
740c4843a75SGreg Thelen */
741e621900aSMatthew Wilcox (Oracle) folio_memcg_lock(folio);
742e621900aSMatthew Wilcox (Oracle) newly_dirty = !folio_test_set_dirty(folio);
7431da177e4SLinus Torvalds spin_unlock(&mapping->private_lock);
7441da177e4SLinus Torvalds
745a8e7d49aSLinus Torvalds if (newly_dirty)
746e621900aSMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 1);
747c4843a75SGreg Thelen
748e621900aSMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
749c4843a75SGreg Thelen
750c4843a75SGreg Thelen if (newly_dirty)
751c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
752c4843a75SGreg Thelen
753a8e7d49aSLinus Torvalds return newly_dirty;
7541da177e4SLinus Torvalds }
755e621900aSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_dirty_folio);
7561da177e4SLinus Torvalds
7571da177e4SLinus Torvalds /*
7581da177e4SLinus Torvalds * Write out and wait upon a list of buffers.
7591da177e4SLinus Torvalds *
7601da177e4SLinus Torvalds * We have conflicting pressures: we want to make sure that all
7611da177e4SLinus Torvalds * initially dirty buffers get waited on, but that any subsequently
7621da177e4SLinus Torvalds * dirtied buffers don't. After all, we don't want fsync to last
7631da177e4SLinus Torvalds * forever if somebody is actively writing to the file.
7641da177e4SLinus Torvalds *
7651da177e4SLinus Torvalds * Do this in two main stages: first we copy dirty buffers to a
7661da177e4SLinus Torvalds * temporary inode list, queueing the writes as we go. Then we clean
7671da177e4SLinus Torvalds * up, waiting for those writes to complete.
7681da177e4SLinus Torvalds *
7691da177e4SLinus Torvalds * During this second stage, any subsequent updates to the file may end
7701da177e4SLinus Torvalds * up refiling the buffer on the original inode's dirty list again, so
7711da177e4SLinus Torvalds * there is a chance we will end up with a buffer queued for write but
7721da177e4SLinus Torvalds * not yet completed on that list. So, as a final cleanup we go through
7731da177e4SLinus Torvalds * the osync code to catch these locked, dirty buffers without requeuing
7741da177e4SLinus Torvalds * any newly dirty buffers for write.
7751da177e4SLinus Torvalds */
fsync_buffers_list(spinlock_t * lock,struct list_head * list)7761da177e4SLinus Torvalds static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
7771da177e4SLinus Torvalds {
7781da177e4SLinus Torvalds struct buffer_head *bh;
7791da177e4SLinus Torvalds struct list_head tmp;
7807eaceaccSJens Axboe struct address_space *mapping;
7811da177e4SLinus Torvalds int err = 0, err2;
7824ee2491eSJens Axboe struct blk_plug plug;
7831da177e4SLinus Torvalds
7841da177e4SLinus Torvalds INIT_LIST_HEAD(&tmp);
7854ee2491eSJens Axboe blk_start_plug(&plug);
7861da177e4SLinus Torvalds
7871da177e4SLinus Torvalds spin_lock(lock);
7881da177e4SLinus Torvalds while (!list_empty(list)) {
7891da177e4SLinus Torvalds bh = BH_ENTRY(list->next);
790535ee2fbSJan Kara mapping = bh->b_assoc_map;
79158ff407bSJan Kara __remove_assoc_queue(bh);
792535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
793535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
794535ee2fbSJan Kara smp_mb();
7951da177e4SLinus Torvalds if (buffer_dirty(bh) || buffer_locked(bh)) {
7961da177e4SLinus Torvalds list_add(&bh->b_assoc_buffers, &tmp);
797535ee2fbSJan Kara bh->b_assoc_map = mapping;
7981da177e4SLinus Torvalds if (buffer_dirty(bh)) {
7991da177e4SLinus Torvalds get_bh(bh);
8001da177e4SLinus Torvalds spin_unlock(lock);
8011da177e4SLinus Torvalds /*
8021da177e4SLinus Torvalds * Ensure any pending I/O completes so that
8039cb569d6SChristoph Hellwig * write_dirty_buffer() actually writes the
8049cb569d6SChristoph Hellwig * current contents - it is a noop if I/O is
8059cb569d6SChristoph Hellwig * still in flight on potentially older
8069cb569d6SChristoph Hellwig * contents.
8071da177e4SLinus Torvalds */
80870fd7614SChristoph Hellwig write_dirty_buffer(bh, REQ_SYNC);
8099cf6b720SJens Axboe
8109cf6b720SJens Axboe /*
8119cf6b720SJens Axboe * Kick off IO for the previous mapping. Note
8129cf6b720SJens Axboe * that we will not run the very last mapping,
8139cf6b720SJens Axboe * wait_on_buffer() will do that for us
8149cf6b720SJens Axboe * through sync_buffer().
8159cf6b720SJens Axboe */
8161da177e4SLinus Torvalds brelse(bh);
8171da177e4SLinus Torvalds spin_lock(lock);
8181da177e4SLinus Torvalds }
8191da177e4SLinus Torvalds }
8201da177e4SLinus Torvalds }
8211da177e4SLinus Torvalds
8224ee2491eSJens Axboe spin_unlock(lock);
8234ee2491eSJens Axboe blk_finish_plug(&plug);
8244ee2491eSJens Axboe spin_lock(lock);
8254ee2491eSJens Axboe
8261da177e4SLinus Torvalds while (!list_empty(&tmp)) {
8271da177e4SLinus Torvalds bh = BH_ENTRY(tmp.prev);
8281da177e4SLinus Torvalds get_bh(bh);
829535ee2fbSJan Kara mapping = bh->b_assoc_map;
830535ee2fbSJan Kara __remove_assoc_queue(bh);
831535ee2fbSJan Kara /* Avoid race with mark_buffer_dirty_inode() which does
832535ee2fbSJan Kara * a lockless check and we rely on seeing the dirty bit */
833535ee2fbSJan Kara smp_mb();
834535ee2fbSJan Kara if (buffer_dirty(bh)) {
835535ee2fbSJan Kara list_add(&bh->b_assoc_buffers,
836e3892296SJan Kara &mapping->private_list);
837535ee2fbSJan Kara bh->b_assoc_map = mapping;
838535ee2fbSJan Kara }
8391da177e4SLinus Torvalds spin_unlock(lock);
8401da177e4SLinus Torvalds wait_on_buffer(bh);
8411da177e4SLinus Torvalds if (!buffer_uptodate(bh))
8421da177e4SLinus Torvalds err = -EIO;
8431da177e4SLinus Torvalds brelse(bh);
8441da177e4SLinus Torvalds spin_lock(lock);
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds
8471da177e4SLinus Torvalds spin_unlock(lock);
8481da177e4SLinus Torvalds err2 = osync_buffers_list(lock, list);
8491da177e4SLinus Torvalds if (err)
8501da177e4SLinus Torvalds return err;
8511da177e4SLinus Torvalds else
8521da177e4SLinus Torvalds return err2;
8531da177e4SLinus Torvalds }
8541da177e4SLinus Torvalds
8551da177e4SLinus Torvalds /*
8561da177e4SLinus Torvalds * Invalidate any and all dirty buffers on a given inode. We are
8571da177e4SLinus Torvalds * probably unmounting the fs, but that doesn't mean we have already
8581da177e4SLinus Torvalds * done a sync(). Just drop the buffers from the inode list.
8591da177e4SLinus Torvalds *
8601da177e4SLinus Torvalds * NOTE: we take the inode's blockdev's mapping's private_lock. Which
8611da177e4SLinus Torvalds * assumes that all the buffers are against the blockdev. Not true
8621da177e4SLinus Torvalds * for reiserfs.
8631da177e4SLinus Torvalds */
invalidate_inode_buffers(struct inode * inode)8641da177e4SLinus Torvalds void invalidate_inode_buffers(struct inode *inode)
8651da177e4SLinus Torvalds {
8661da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8671da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
8681da177e4SLinus Torvalds struct list_head *list = &mapping->private_list;
869252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data;
8701da177e4SLinus Torvalds
8711da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock);
8721da177e4SLinus Torvalds while (!list_empty(list))
8731da177e4SLinus Torvalds __remove_assoc_queue(BH_ENTRY(list->next));
8741da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock);
8751da177e4SLinus Torvalds }
8761da177e4SLinus Torvalds }
87752b19ac9SJan Kara EXPORT_SYMBOL(invalidate_inode_buffers);
8781da177e4SLinus Torvalds
8791da177e4SLinus Torvalds /*
8801da177e4SLinus Torvalds * Remove any clean buffers from the inode's buffer list. This is called
8811da177e4SLinus Torvalds * when we're trying to free the inode itself. Those buffers can pin it.
8821da177e4SLinus Torvalds *
8831da177e4SLinus Torvalds * Returns true if all buffers were removed.
8841da177e4SLinus Torvalds */
remove_inode_buffers(struct inode * inode)8851da177e4SLinus Torvalds int remove_inode_buffers(struct inode *inode)
8861da177e4SLinus Torvalds {
8871da177e4SLinus Torvalds int ret = 1;
8881da177e4SLinus Torvalds
8891da177e4SLinus Torvalds if (inode_has_buffers(inode)) {
8901da177e4SLinus Torvalds struct address_space *mapping = &inode->i_data;
8911da177e4SLinus Torvalds struct list_head *list = &mapping->private_list;
892252aa6f5SRafael Aquini struct address_space *buffer_mapping = mapping->private_data;
8931da177e4SLinus Torvalds
8941da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock);
8951da177e4SLinus Torvalds while (!list_empty(list)) {
8961da177e4SLinus Torvalds struct buffer_head *bh = BH_ENTRY(list->next);
8971da177e4SLinus Torvalds if (buffer_dirty(bh)) {
8981da177e4SLinus Torvalds ret = 0;
8991da177e4SLinus Torvalds break;
9001da177e4SLinus Torvalds }
9011da177e4SLinus Torvalds __remove_assoc_queue(bh);
9021da177e4SLinus Torvalds }
9031da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock);
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds return ret;
9061da177e4SLinus Torvalds }
9071da177e4SLinus Torvalds
9081da177e4SLinus Torvalds /*
909c71124a8SPankaj Raghav * Create the appropriate buffers when given a folio for data area and
9101da177e4SLinus Torvalds * the size of each buffer.. Use the bh->b_this_page linked list to
9111da177e4SLinus Torvalds * follow the buffers created. Return NULL if unable to create more
9121da177e4SLinus Torvalds * buffers.
9131da177e4SLinus Torvalds *
9141da177e4SLinus Torvalds * The retry flag is used to differentiate async IO (paging, swapping)
9151da177e4SLinus Torvalds * which may not fail from ordinary buffer allocations.
9161da177e4SLinus Torvalds */
folio_alloc_buffers(struct folio * folio,unsigned long size,bool retry)917c71124a8SPankaj Raghav struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
918640ab98fSJens Axboe bool retry)
9191da177e4SLinus Torvalds {
9201da177e4SLinus Torvalds struct buffer_head *bh, *head;
921f745c6f5SShakeel Butt gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
9221da177e4SLinus Torvalds long offset;
923b87d8cefSRoman Gushchin struct mem_cgroup *memcg, *old_memcg;
9241da177e4SLinus Torvalds
925640ab98fSJens Axboe if (retry)
926640ab98fSJens Axboe gfp |= __GFP_NOFAIL;
927640ab98fSJens Axboe
928c71124a8SPankaj Raghav /* The folio lock pins the memcg */
929c71124a8SPankaj Raghav memcg = folio_memcg(folio);
930b87d8cefSRoman Gushchin old_memcg = set_active_memcg(memcg);
931f745c6f5SShakeel Butt
9321da177e4SLinus Torvalds head = NULL;
933c71124a8SPankaj Raghav offset = folio_size(folio);
9341da177e4SLinus Torvalds while ((offset -= size) >= 0) {
935640ab98fSJens Axboe bh = alloc_buffer_head(gfp);
9361da177e4SLinus Torvalds if (!bh)
9371da177e4SLinus Torvalds goto no_grow;
9381da177e4SLinus Torvalds
9391da177e4SLinus Torvalds bh->b_this_page = head;
9401da177e4SLinus Torvalds bh->b_blocknr = -1;
9411da177e4SLinus Torvalds head = bh;
9421da177e4SLinus Torvalds
9431da177e4SLinus Torvalds bh->b_size = size;
9441da177e4SLinus Torvalds
945c71124a8SPankaj Raghav /* Link the buffer to its folio */
946c71124a8SPankaj Raghav folio_set_bh(bh, folio, offset);
9471da177e4SLinus Torvalds }
948f745c6f5SShakeel Butt out:
949b87d8cefSRoman Gushchin set_active_memcg(old_memcg);
9501da177e4SLinus Torvalds return head;
9511da177e4SLinus Torvalds /*
9521da177e4SLinus Torvalds * In case anything failed, we just free everything we got.
9531da177e4SLinus Torvalds */
9541da177e4SLinus Torvalds no_grow:
9551da177e4SLinus Torvalds if (head) {
9561da177e4SLinus Torvalds do {
9571da177e4SLinus Torvalds bh = head;
9581da177e4SLinus Torvalds head = head->b_this_page;
9591da177e4SLinus Torvalds free_buffer_head(bh);
9601da177e4SLinus Torvalds } while (head);
9611da177e4SLinus Torvalds }
9621da177e4SLinus Torvalds
963f745c6f5SShakeel Butt goto out;
9641da177e4SLinus Torvalds }
965c71124a8SPankaj Raghav EXPORT_SYMBOL_GPL(folio_alloc_buffers);
966c71124a8SPankaj Raghav
alloc_page_buffers(struct page * page,unsigned long size,bool retry)967c71124a8SPankaj Raghav struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
968c71124a8SPankaj Raghav bool retry)
969c71124a8SPankaj Raghav {
970c71124a8SPankaj Raghav return folio_alloc_buffers(page_folio(page), size, retry);
971c71124a8SPankaj Raghav }
9721da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(alloc_page_buffers);
9731da177e4SLinus Torvalds
link_dev_buffers(struct folio * folio,struct buffer_head * head)97408d84addSMatthew Wilcox (Oracle) static inline void link_dev_buffers(struct folio *folio,
97508d84addSMatthew Wilcox (Oracle) struct buffer_head *head)
9761da177e4SLinus Torvalds {
9771da177e4SLinus Torvalds struct buffer_head *bh, *tail;
9781da177e4SLinus Torvalds
9791da177e4SLinus Torvalds bh = head;
9801da177e4SLinus Torvalds do {
9811da177e4SLinus Torvalds tail = bh;
9821da177e4SLinus Torvalds bh = bh->b_this_page;
9831da177e4SLinus Torvalds } while (bh);
9841da177e4SLinus Torvalds tail->b_this_page = head;
98508d84addSMatthew Wilcox (Oracle) folio_attach_private(folio, head);
9861da177e4SLinus Torvalds }
9871da177e4SLinus Torvalds
blkdev_max_block(struct block_device * bdev,unsigned int size)988bbec0270SLinus Torvalds static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
989bbec0270SLinus Torvalds {
990bbec0270SLinus Torvalds sector_t retval = ~((sector_t)0);
991b86058f9SChristoph Hellwig loff_t sz = bdev_nr_bytes(bdev);
992bbec0270SLinus Torvalds
993bbec0270SLinus Torvalds if (sz) {
994bbec0270SLinus Torvalds unsigned int sizebits = blksize_bits(size);
995bbec0270SLinus Torvalds retval = (sz >> sizebits);
996bbec0270SLinus Torvalds }
997bbec0270SLinus Torvalds return retval;
998bbec0270SLinus Torvalds }
999bbec0270SLinus Torvalds
10001da177e4SLinus Torvalds /*
10016f24ce6bSMatthew Wilcox (Oracle) * Initialise the state of a blockdev folio's buffers.
10021da177e4SLinus Torvalds */
folio_init_buffers(struct folio * folio,struct block_device * bdev,sector_t block,int size)10036f24ce6bSMatthew Wilcox (Oracle) static sector_t folio_init_buffers(struct folio *folio,
10046f24ce6bSMatthew Wilcox (Oracle) struct block_device *bdev, sector_t block, int size)
10051da177e4SLinus Torvalds {
10066f24ce6bSMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
10071da177e4SLinus Torvalds struct buffer_head *bh = head;
10086f24ce6bSMatthew Wilcox (Oracle) bool uptodate = folio_test_uptodate(folio);
1009bcd1d063SChristoph Hellwig sector_t end_block = blkdev_max_block(bdev, size);
10101da177e4SLinus Torvalds
10111da177e4SLinus Torvalds do {
10121da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
101301950a34SEric Biggers bh->b_end_io = NULL;
101401950a34SEric Biggers bh->b_private = NULL;
10151da177e4SLinus Torvalds bh->b_bdev = bdev;
10161da177e4SLinus Torvalds bh->b_blocknr = block;
10171da177e4SLinus Torvalds if (uptodate)
10181da177e4SLinus Torvalds set_buffer_uptodate(bh);
1019080399aaSJeff Moyer if (block < end_block)
10201da177e4SLinus Torvalds set_buffer_mapped(bh);
10211da177e4SLinus Torvalds }
10221da177e4SLinus Torvalds block++;
10231da177e4SLinus Torvalds bh = bh->b_this_page;
10241da177e4SLinus Torvalds } while (bh != head);
1025676ce6d5SHugh Dickins
1026676ce6d5SHugh Dickins /*
1027676ce6d5SHugh Dickins * Caller needs to validate requested block against end of device.
1028676ce6d5SHugh Dickins */
1029676ce6d5SHugh Dickins return end_block;
10301da177e4SLinus Torvalds }
10311da177e4SLinus Torvalds
10321da177e4SLinus Torvalds /*
10331da177e4SLinus Torvalds * Create the page-cache page that contains the requested block.
10341da177e4SLinus Torvalds *
1035676ce6d5SHugh Dickins * This is used purely for blockdev mappings.
10361da177e4SLinus Torvalds */
1037676ce6d5SHugh Dickins static int
grow_dev_page(struct block_device * bdev,sector_t block,pgoff_t index,int size,int sizebits,gfp_t gfp)10381da177e4SLinus Torvalds grow_dev_page(struct block_device *bdev, sector_t block,
10393b5e6454SGioh Kim pgoff_t index, int size, int sizebits, gfp_t gfp)
10401da177e4SLinus Torvalds {
10411da177e4SLinus Torvalds struct inode *inode = bdev->bd_inode;
10423c98a41cSMatthew Wilcox (Oracle) struct folio *folio;
10431da177e4SLinus Torvalds struct buffer_head *bh;
1044676ce6d5SHugh Dickins sector_t end_block;
1045c4b4c2a7SZhiqiang Liu int ret = 0;
104684235de3SJohannes Weiner gfp_t gfp_mask;
10471da177e4SLinus Torvalds
1048c62d2555SMichal Hocko gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
10493b5e6454SGioh Kim
105084235de3SJohannes Weiner /*
105184235de3SJohannes Weiner * XXX: __getblk_slow() can not really deal with failure and
105284235de3SJohannes Weiner * will endlessly loop on improvised global reclaim. Prefer
105384235de3SJohannes Weiner * looping in the allocator rather than here, at least that
105484235de3SJohannes Weiner * code knows what it's doing.
105584235de3SJohannes Weiner */
105684235de3SJohannes Weiner gfp_mask |= __GFP_NOFAIL;
105784235de3SJohannes Weiner
10583c98a41cSMatthew Wilcox (Oracle) folio = __filemap_get_folio(inode->i_mapping, index,
10593c98a41cSMatthew Wilcox (Oracle) FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
10601da177e4SLinus Torvalds
10613c98a41cSMatthew Wilcox (Oracle) bh = folio_buffers(folio);
10623c98a41cSMatthew Wilcox (Oracle) if (bh) {
10631da177e4SLinus Torvalds if (bh->b_size == size) {
10646f24ce6bSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev,
10656f24ce6bSMatthew Wilcox (Oracle) (sector_t)index << sizebits, size);
1066676ce6d5SHugh Dickins goto done;
10671da177e4SLinus Torvalds }
10683c98a41cSMatthew Wilcox (Oracle) if (!try_to_free_buffers(folio))
10691da177e4SLinus Torvalds goto failed;
10701da177e4SLinus Torvalds }
10711da177e4SLinus Torvalds
10723c98a41cSMatthew Wilcox (Oracle) bh = folio_alloc_buffers(folio, size, true);
10731da177e4SLinus Torvalds
10741da177e4SLinus Torvalds /*
10753c98a41cSMatthew Wilcox (Oracle) * Link the folio to the buffers and initialise them. Take the
10761da177e4SLinus Torvalds * lock to be atomic wrt __find_get_block(), which does not
10773c98a41cSMatthew Wilcox (Oracle) * run under the folio lock.
10781da177e4SLinus Torvalds */
10791da177e4SLinus Torvalds spin_lock(&inode->i_mapping->private_lock);
108008d84addSMatthew Wilcox (Oracle) link_dev_buffers(folio, bh);
10816f24ce6bSMatthew Wilcox (Oracle) end_block = folio_init_buffers(folio, bdev,
10823c98a41cSMatthew Wilcox (Oracle) (sector_t)index << sizebits, size);
10831da177e4SLinus Torvalds spin_unlock(&inode->i_mapping->private_lock);
1084676ce6d5SHugh Dickins done:
1085676ce6d5SHugh Dickins ret = (block < end_block) ? 1 : -ENXIO;
10861da177e4SLinus Torvalds failed:
10873c98a41cSMatthew Wilcox (Oracle) folio_unlock(folio);
10883c98a41cSMatthew Wilcox (Oracle) folio_put(folio);
1089676ce6d5SHugh Dickins return ret;
10901da177e4SLinus Torvalds }
10911da177e4SLinus Torvalds
10921da177e4SLinus Torvalds /*
10931da177e4SLinus Torvalds * Create buffers for the specified block device block's page. If
10941da177e4SLinus Torvalds * that page was dirty, the buffers are set dirty also.
10951da177e4SLinus Torvalds */
1096858119e1SArjan van de Ven static int
grow_buffers(struct block_device * bdev,sector_t block,int size,gfp_t gfp)10973b5e6454SGioh Kim grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
10981da177e4SLinus Torvalds {
10991da177e4SLinus Torvalds pgoff_t index;
11001da177e4SLinus Torvalds int sizebits;
11011da177e4SLinus Torvalds
110290432e60SMikulas Patocka sizebits = PAGE_SHIFT - __ffs(size);
11031da177e4SLinus Torvalds index = block >> sizebits;
11041da177e4SLinus Torvalds
1105e5657933SAndrew Morton /*
1106e5657933SAndrew Morton * Check for a block which wants to lie outside our maximum possible
1107e5657933SAndrew Morton * pagecache index. (this comparison is done using sector_t types).
1108e5657933SAndrew Morton */
1109e5657933SAndrew Morton if (unlikely(index != block >> sizebits)) {
1110e5657933SAndrew Morton printk(KERN_ERR "%s: requested out-of-range block %llu for "
1111a1c6f057SDmitry Monakhov "device %pg\n",
11128e24eea7SHarvey Harrison __func__, (unsigned long long)block,
1113a1c6f057SDmitry Monakhov bdev);
1114e5657933SAndrew Morton return -EIO;
1115e5657933SAndrew Morton }
1116676ce6d5SHugh Dickins
11171da177e4SLinus Torvalds /* Create a page with the proper size buffers.. */
11183b5e6454SGioh Kim return grow_dev_page(bdev, block, index, size, sizebits, gfp);
11191da177e4SLinus Torvalds }
11201da177e4SLinus Torvalds
11210026ba40SEric Biggers static struct buffer_head *
__getblk_slow(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)11223b5e6454SGioh Kim __getblk_slow(struct block_device *bdev, sector_t block,
11233b5e6454SGioh Kim unsigned size, gfp_t gfp)
11241da177e4SLinus Torvalds {
11251da177e4SLinus Torvalds /* Size must be multiple of hard sectorsize */
1126e1defc4fSMartin K. Petersen if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
11271da177e4SLinus Torvalds (size < 512 || size > PAGE_SIZE))) {
11281da177e4SLinus Torvalds printk(KERN_ERR "getblk(): invalid block size %d requested\n",
11291da177e4SLinus Torvalds size);
1130e1defc4fSMartin K. Petersen printk(KERN_ERR "logical block size: %d\n",
1131e1defc4fSMartin K. Petersen bdev_logical_block_size(bdev));
11321da177e4SLinus Torvalds
11331da177e4SLinus Torvalds dump_stack();
11341da177e4SLinus Torvalds return NULL;
11351da177e4SLinus Torvalds }
11361da177e4SLinus Torvalds
1137676ce6d5SHugh Dickins for (;;) {
1138676ce6d5SHugh Dickins struct buffer_head *bh;
1139676ce6d5SHugh Dickins int ret;
1140676ce6d5SHugh Dickins
11411da177e4SLinus Torvalds bh = __find_get_block(bdev, block, size);
11421da177e4SLinus Torvalds if (bh)
11431da177e4SLinus Torvalds return bh;
11441da177e4SLinus Torvalds
11453b5e6454SGioh Kim ret = grow_buffers(bdev, block, size, gfp);
1146676ce6d5SHugh Dickins if (ret < 0)
114791f68c89SJeff Moyer return NULL;
1148676ce6d5SHugh Dickins }
11491da177e4SLinus Torvalds }
11501da177e4SLinus Torvalds
11511da177e4SLinus Torvalds /*
11521da177e4SLinus Torvalds * The relationship between dirty buffers and dirty pages:
11531da177e4SLinus Torvalds *
11541da177e4SLinus Torvalds * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1155ec82e1c1SMatthew Wilcox * the page is tagged dirty in the page cache.
11561da177e4SLinus Torvalds *
11571da177e4SLinus Torvalds * At all times, the dirtiness of the buffers represents the dirtiness of
11581da177e4SLinus Torvalds * subsections of the page. If the page has buffers, the page dirty bit is
11591da177e4SLinus Torvalds * merely a hint about the true dirty state.
11601da177e4SLinus Torvalds *
11611da177e4SLinus Torvalds * When a page is set dirty in its entirety, all its buffers are marked dirty
11621da177e4SLinus Torvalds * (if the page has buffers).
11631da177e4SLinus Torvalds *
11641da177e4SLinus Torvalds * When a buffer is marked dirty, its page is dirtied, but the page's other
11651da177e4SLinus Torvalds * buffers are not.
11661da177e4SLinus Torvalds *
11671da177e4SLinus Torvalds * Also. When blockdev buffers are explicitly read with bread(), they
11681da177e4SLinus Torvalds * individually become uptodate. But their backing page remains not
11691da177e4SLinus Torvalds * uptodate - even if all of its buffers are uptodate. A subsequent
11702c69e205SMatthew Wilcox (Oracle) * block_read_full_folio() against that folio will discover all the uptodate
11712c69e205SMatthew Wilcox (Oracle) * buffers, will set the folio uptodate and will perform no I/O.
11721da177e4SLinus Torvalds */
11731da177e4SLinus Torvalds
11741da177e4SLinus Torvalds /**
11751da177e4SLinus Torvalds * mark_buffer_dirty - mark a buffer_head as needing writeout
117667be2dd1SMartin Waitz * @bh: the buffer_head to mark dirty
11771da177e4SLinus Torvalds *
1178ec82e1c1SMatthew Wilcox * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1179ec82e1c1SMatthew Wilcox * its backing page dirty, then tag the page as dirty in the page cache
1180ec82e1c1SMatthew Wilcox * and then attach the address_space's inode to its superblock's dirty
11811da177e4SLinus Torvalds * inode list.
11821da177e4SLinus Torvalds *
1183abc8a8a2SMatthew Wilcox (Oracle) * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
1184b93b0163SMatthew Wilcox * i_pages lock and mapping->host->i_lock.
11851da177e4SLinus Torvalds */
mark_buffer_dirty(struct buffer_head * bh)1186fc9b52cdSHarvey Harrison void mark_buffer_dirty(struct buffer_head *bh)
11871da177e4SLinus Torvalds {
1188787d2214SNick Piggin WARN_ON_ONCE(!buffer_uptodate(bh));
11891be62dc1SLinus Torvalds
11905305cb83STejun Heo trace_block_dirty_buffer(bh);
11915305cb83STejun Heo
11921be62dc1SLinus Torvalds /*
11931be62dc1SLinus Torvalds * Very *carefully* optimize the it-is-already-dirty case.
11941be62dc1SLinus Torvalds *
11951be62dc1SLinus Torvalds * Don't let the final "is it dirty" escape to before we
11961be62dc1SLinus Torvalds * perhaps modified the buffer.
11971be62dc1SLinus Torvalds */
11981be62dc1SLinus Torvalds if (buffer_dirty(bh)) {
11991be62dc1SLinus Torvalds smp_mb();
12001be62dc1SLinus Torvalds if (buffer_dirty(bh))
12011be62dc1SLinus Torvalds return;
12021be62dc1SLinus Torvalds }
12031be62dc1SLinus Torvalds
1204a8e7d49aSLinus Torvalds if (!test_set_buffer_dirty(bh)) {
1205cf1d3417SMatthew Wilcox (Oracle) struct folio *folio = bh->b_folio;
1206c4843a75SGreg Thelen struct address_space *mapping = NULL;
1207c4843a75SGreg Thelen
1208cf1d3417SMatthew Wilcox (Oracle) folio_memcg_lock(folio);
1209cf1d3417SMatthew Wilcox (Oracle) if (!folio_test_set_dirty(folio)) {
1210cf1d3417SMatthew Wilcox (Oracle) mapping = folio->mapping;
12118e9d78edSLinus Torvalds if (mapping)
1212cf1d3417SMatthew Wilcox (Oracle) __folio_mark_dirty(folio, mapping, 0);
12138e9d78edSLinus Torvalds }
1214cf1d3417SMatthew Wilcox (Oracle) folio_memcg_unlock(folio);
1215c4843a75SGreg Thelen if (mapping)
1216c4843a75SGreg Thelen __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1217a8e7d49aSLinus Torvalds }
12181da177e4SLinus Torvalds }
12191fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(mark_buffer_dirty);
12201da177e4SLinus Torvalds
mark_buffer_write_io_error(struct buffer_head * bh)122187354e5dSJeff Layton void mark_buffer_write_io_error(struct buffer_head *bh)
122287354e5dSJeff Layton {
122387354e5dSJeff Layton set_buffer_write_io_error(bh);
122487354e5dSJeff Layton /* FIXME: do we need to set this in both places? */
1225abc8a8a2SMatthew Wilcox (Oracle) if (bh->b_folio && bh->b_folio->mapping)
1226abc8a8a2SMatthew Wilcox (Oracle) mapping_set_error(bh->b_folio->mapping, -EIO);
12274b2201daSChristoph Hellwig if (bh->b_assoc_map) {
122887354e5dSJeff Layton mapping_set_error(bh->b_assoc_map, -EIO);
12294b2201daSChristoph Hellwig errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
12304b2201daSChristoph Hellwig }
123187354e5dSJeff Layton }
123287354e5dSJeff Layton EXPORT_SYMBOL(mark_buffer_write_io_error);
123387354e5dSJeff Layton
12341da177e4SLinus Torvalds /*
12351da177e4SLinus Torvalds * Decrement a buffer_head's reference count. If all buffers against a page
12361da177e4SLinus Torvalds * have zero reference count, are clean and unlocked, and if the page is clean
12371da177e4SLinus Torvalds * and unlocked then try_to_free_buffers() may strip the buffers from the page
12381da177e4SLinus Torvalds * in preparation for freeing it (sometimes, rarely, buffers are removed from
12391da177e4SLinus Torvalds * a page but it ends up not being freed, and buffers may later be reattached).
12401da177e4SLinus Torvalds */
__brelse(struct buffer_head * buf)12411da177e4SLinus Torvalds void __brelse(struct buffer_head * buf)
12421da177e4SLinus Torvalds {
12431da177e4SLinus Torvalds if (atomic_read(&buf->b_count)) {
12441da177e4SLinus Torvalds put_bh(buf);
12451da177e4SLinus Torvalds return;
12461da177e4SLinus Torvalds }
12475c752ad9SArjan van de Ven WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
12481da177e4SLinus Torvalds }
12491fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__brelse);
12501da177e4SLinus Torvalds
12511da177e4SLinus Torvalds /*
12521da177e4SLinus Torvalds * bforget() is like brelse(), except it discards any
12531da177e4SLinus Torvalds * potentially dirty data.
12541da177e4SLinus Torvalds */
__bforget(struct buffer_head * bh)12551da177e4SLinus Torvalds void __bforget(struct buffer_head *bh)
12561da177e4SLinus Torvalds {
12571da177e4SLinus Torvalds clear_buffer_dirty(bh);
1258535ee2fbSJan Kara if (bh->b_assoc_map) {
1259abc8a8a2SMatthew Wilcox (Oracle) struct address_space *buffer_mapping = bh->b_folio->mapping;
12601da177e4SLinus Torvalds
12611da177e4SLinus Torvalds spin_lock(&buffer_mapping->private_lock);
12621da177e4SLinus Torvalds list_del_init(&bh->b_assoc_buffers);
126358ff407bSJan Kara bh->b_assoc_map = NULL;
12641da177e4SLinus Torvalds spin_unlock(&buffer_mapping->private_lock);
12651da177e4SLinus Torvalds }
12661da177e4SLinus Torvalds __brelse(bh);
12671da177e4SLinus Torvalds }
12681fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(__bforget);
12691da177e4SLinus Torvalds
__bread_slow(struct buffer_head * bh)12701da177e4SLinus Torvalds static struct buffer_head *__bread_slow(struct buffer_head *bh)
12711da177e4SLinus Torvalds {
12721da177e4SLinus Torvalds lock_buffer(bh);
12731da177e4SLinus Torvalds if (buffer_uptodate(bh)) {
12741da177e4SLinus Torvalds unlock_buffer(bh);
12751da177e4SLinus Torvalds return bh;
12761da177e4SLinus Torvalds } else {
12771da177e4SLinus Torvalds get_bh(bh);
12781da177e4SLinus Torvalds bh->b_end_io = end_buffer_read_sync;
12791420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh);
12801da177e4SLinus Torvalds wait_on_buffer(bh);
12811da177e4SLinus Torvalds if (buffer_uptodate(bh))
12821da177e4SLinus Torvalds return bh;
12831da177e4SLinus Torvalds }
12841da177e4SLinus Torvalds brelse(bh);
12851da177e4SLinus Torvalds return NULL;
12861da177e4SLinus Torvalds }
12871da177e4SLinus Torvalds
12881da177e4SLinus Torvalds /*
12891da177e4SLinus Torvalds * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
12901da177e4SLinus Torvalds * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
12911da177e4SLinus Torvalds * refcount elevated by one when they're in an LRU. A buffer can only appear
12921da177e4SLinus Torvalds * once in a particular CPU's LRU. A single buffer can be present in multiple
12931da177e4SLinus Torvalds * CPU's LRUs at the same time.
12941da177e4SLinus Torvalds *
12951da177e4SLinus Torvalds * This is a transparent caching front-end to sb_bread(), sb_getblk() and
12961da177e4SLinus Torvalds * sb_find_get_block().
12971da177e4SLinus Torvalds *
12981da177e4SLinus Torvalds * The LRUs themselves only need locking against invalidate_bh_lrus. We use
12991da177e4SLinus Torvalds * a local interrupt disable for that.
13001da177e4SLinus Torvalds */
13011da177e4SLinus Torvalds
130286cf78d7SSebastien Buisson #define BH_LRU_SIZE 16
13031da177e4SLinus Torvalds
13041da177e4SLinus Torvalds struct bh_lru {
13051da177e4SLinus Torvalds struct buffer_head *bhs[BH_LRU_SIZE];
13061da177e4SLinus Torvalds };
13071da177e4SLinus Torvalds
13081da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
13091da177e4SLinus Torvalds
13101da177e4SLinus Torvalds #ifdef CONFIG_SMP
13111da177e4SLinus Torvalds #define bh_lru_lock() local_irq_disable()
13121da177e4SLinus Torvalds #define bh_lru_unlock() local_irq_enable()
13131da177e4SLinus Torvalds #else
13141da177e4SLinus Torvalds #define bh_lru_lock() preempt_disable()
13151da177e4SLinus Torvalds #define bh_lru_unlock() preempt_enable()
13161da177e4SLinus Torvalds #endif
13171da177e4SLinus Torvalds
check_irqs_on(void)13181da177e4SLinus Torvalds static inline void check_irqs_on(void)
13191da177e4SLinus Torvalds {
13201da177e4SLinus Torvalds #ifdef irqs_disabled
13211da177e4SLinus Torvalds BUG_ON(irqs_disabled());
13221da177e4SLinus Torvalds #endif
13231da177e4SLinus Torvalds }
13241da177e4SLinus Torvalds
13251da177e4SLinus Torvalds /*
1326241f01fbSEric Biggers * Install a buffer_head into this cpu's LRU. If not already in the LRU, it is
1327241f01fbSEric Biggers * inserted at the front, and the buffer_head at the back if any is evicted.
1328241f01fbSEric Biggers * Or, if already in the LRU it is moved to the front.
13291da177e4SLinus Torvalds */
bh_lru_install(struct buffer_head * bh)13301da177e4SLinus Torvalds static void bh_lru_install(struct buffer_head *bh)
13311da177e4SLinus Torvalds {
1332241f01fbSEric Biggers struct buffer_head *evictee = bh;
1333241f01fbSEric Biggers struct bh_lru *b;
1334241f01fbSEric Biggers int i;
13351da177e4SLinus Torvalds
13361da177e4SLinus Torvalds check_irqs_on();
1337c0226eb8SMinchan Kim bh_lru_lock();
1338c0226eb8SMinchan Kim
13398cc621d2SMinchan Kim /*
13408cc621d2SMinchan Kim * the refcount of buffer_head in bh_lru prevents dropping the
13418cc621d2SMinchan Kim * attached page(i.e., try_to_free_buffers) so it could cause
13428cc621d2SMinchan Kim * failing page migration.
13438cc621d2SMinchan Kim * Skip putting upcoming bh into bh_lru until migration is done.
13448cc621d2SMinchan Kim */
13458a237adfSMarcelo Tosatti if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1346c0226eb8SMinchan Kim bh_lru_unlock();
13478cc621d2SMinchan Kim return;
1348c0226eb8SMinchan Kim }
1349241f01fbSEric Biggers
1350241f01fbSEric Biggers b = this_cpu_ptr(&bh_lrus);
1351241f01fbSEric Biggers for (i = 0; i < BH_LRU_SIZE; i++) {
1352241f01fbSEric Biggers swap(evictee, b->bhs[i]);
1353241f01fbSEric Biggers if (evictee == bh) {
1354241f01fbSEric Biggers bh_lru_unlock();
1355241f01fbSEric Biggers return;
1356241f01fbSEric Biggers }
1357241f01fbSEric Biggers }
13581da177e4SLinus Torvalds
13591da177e4SLinus Torvalds get_bh(bh);
13601da177e4SLinus Torvalds bh_lru_unlock();
1361241f01fbSEric Biggers brelse(evictee);
13621da177e4SLinus Torvalds }
13631da177e4SLinus Torvalds
13641da177e4SLinus Torvalds /*
13651da177e4SLinus Torvalds * Look up the bh in this cpu's LRU. If it's there, move it to the head.
13661da177e4SLinus Torvalds */
1367858119e1SArjan van de Ven static struct buffer_head *
lookup_bh_lru(struct block_device * bdev,sector_t block,unsigned size)13683991d3bdSTomasz Kvarsin lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
13691da177e4SLinus Torvalds {
13701da177e4SLinus Torvalds struct buffer_head *ret = NULL;
13713991d3bdSTomasz Kvarsin unsigned int i;
13721da177e4SLinus Torvalds
13731da177e4SLinus Torvalds check_irqs_on();
13741da177e4SLinus Torvalds bh_lru_lock();
13758a237adfSMarcelo Tosatti if (cpu_is_isolated(smp_processor_id())) {
13768a237adfSMarcelo Tosatti bh_lru_unlock();
13778a237adfSMarcelo Tosatti return NULL;
13788a237adfSMarcelo Tosatti }
13791da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
1380c7b92516SChristoph Lameter struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
13811da177e4SLinus Torvalds
13829470dd5dSZach Brown if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
13839470dd5dSZach Brown bh->b_size == size) {
13841da177e4SLinus Torvalds if (i) {
13851da177e4SLinus Torvalds while (i) {
1386c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[i],
1387c7b92516SChristoph Lameter __this_cpu_read(bh_lrus.bhs[i - 1]));
13881da177e4SLinus Torvalds i--;
13891da177e4SLinus Torvalds }
1390c7b92516SChristoph Lameter __this_cpu_write(bh_lrus.bhs[0], bh);
13911da177e4SLinus Torvalds }
13921da177e4SLinus Torvalds get_bh(bh);
13931da177e4SLinus Torvalds ret = bh;
13941da177e4SLinus Torvalds break;
13951da177e4SLinus Torvalds }
13961da177e4SLinus Torvalds }
13971da177e4SLinus Torvalds bh_lru_unlock();
13981da177e4SLinus Torvalds return ret;
13991da177e4SLinus Torvalds }
14001da177e4SLinus Torvalds
14011da177e4SLinus Torvalds /*
14021da177e4SLinus Torvalds * Perform a pagecache lookup for the matching buffer. If it's there, refresh
14031da177e4SLinus Torvalds * it in the LRU and mark it as accessed. If it is not present then return
14041da177e4SLinus Torvalds * NULL
14051da177e4SLinus Torvalds */
14061da177e4SLinus Torvalds struct buffer_head *
__find_get_block(struct block_device * bdev,sector_t block,unsigned size)14073991d3bdSTomasz Kvarsin __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
14081da177e4SLinus Torvalds {
14091da177e4SLinus Torvalds struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
14101da177e4SLinus Torvalds
14111da177e4SLinus Torvalds if (bh == NULL) {
14122457aec6SMel Gorman /* __find_get_block_slow will mark the page accessed */
1413385fd4c5SCoywolf Qi Hunt bh = __find_get_block_slow(bdev, block);
14141da177e4SLinus Torvalds if (bh)
14151da177e4SLinus Torvalds bh_lru_install(bh);
14162457aec6SMel Gorman } else
14171da177e4SLinus Torvalds touch_buffer(bh);
14182457aec6SMel Gorman
14191da177e4SLinus Torvalds return bh;
14201da177e4SLinus Torvalds }
14211da177e4SLinus Torvalds EXPORT_SYMBOL(__find_get_block);
14221da177e4SLinus Torvalds
14231da177e4SLinus Torvalds /*
14243b5e6454SGioh Kim * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
14251da177e4SLinus Torvalds * which corresponds to the passed block_device, block and size. The
14261da177e4SLinus Torvalds * returned buffer has its reference count incremented.
14271da177e4SLinus Torvalds *
14283b5e6454SGioh Kim * __getblk_gfp() will lock up the machine if grow_dev_page's
14293b5e6454SGioh Kim * try_to_free_buffers() attempt is failing. FIXME, perhaps?
14301da177e4SLinus Torvalds */
14311da177e4SLinus Torvalds struct buffer_head *
__getblk_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)14323b5e6454SGioh Kim __getblk_gfp(struct block_device *bdev, sector_t block,
14333b5e6454SGioh Kim unsigned size, gfp_t gfp)
14341da177e4SLinus Torvalds {
14351da177e4SLinus Torvalds struct buffer_head *bh = __find_get_block(bdev, block, size);
14361da177e4SLinus Torvalds
14371da177e4SLinus Torvalds might_sleep();
14381da177e4SLinus Torvalds if (bh == NULL)
14393b5e6454SGioh Kim bh = __getblk_slow(bdev, block, size, gfp);
14401da177e4SLinus Torvalds return bh;
14411da177e4SLinus Torvalds }
14423b5e6454SGioh Kim EXPORT_SYMBOL(__getblk_gfp);
14431da177e4SLinus Torvalds
14441da177e4SLinus Torvalds /*
14451da177e4SLinus Torvalds * Do async read-ahead on a buffer..
14461da177e4SLinus Torvalds */
__breadahead(struct block_device * bdev,sector_t block,unsigned size)14473991d3bdSTomasz Kvarsin void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
14481da177e4SLinus Torvalds {
14491da177e4SLinus Torvalds struct buffer_head *bh = __getblk(bdev, block, size);
1450a3e713b5SAndrew Morton if (likely(bh)) {
1451e7ea1129SZhang Yi bh_readahead(bh, REQ_RAHEAD);
14521da177e4SLinus Torvalds brelse(bh);
14531da177e4SLinus Torvalds }
1454a3e713b5SAndrew Morton }
14551da177e4SLinus Torvalds EXPORT_SYMBOL(__breadahead);
14561da177e4SLinus Torvalds
14571da177e4SLinus Torvalds /**
14583b5e6454SGioh Kim * __bread_gfp() - reads a specified block and returns the bh
145967be2dd1SMartin Waitz * @bdev: the block_device to read from
14601da177e4SLinus Torvalds * @block: number of block
14611da177e4SLinus Torvalds * @size: size (in bytes) to read
14623b5e6454SGioh Kim * @gfp: page allocation flag
14631da177e4SLinus Torvalds *
14641da177e4SLinus Torvalds * Reads a specified block, and returns buffer head that contains it.
14653b5e6454SGioh Kim * The page cache can be allocated from non-movable area
14663b5e6454SGioh Kim * not to prevent page migration if you set gfp to zero.
14671da177e4SLinus Torvalds * It returns NULL if the block was unreadable.
14681da177e4SLinus Torvalds */
14691da177e4SLinus Torvalds struct buffer_head *
__bread_gfp(struct block_device * bdev,sector_t block,unsigned size,gfp_t gfp)14703b5e6454SGioh Kim __bread_gfp(struct block_device *bdev, sector_t block,
14713b5e6454SGioh Kim unsigned size, gfp_t gfp)
14721da177e4SLinus Torvalds {
14733b5e6454SGioh Kim struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
14741da177e4SLinus Torvalds
1475a3e713b5SAndrew Morton if (likely(bh) && !buffer_uptodate(bh))
14761da177e4SLinus Torvalds bh = __bread_slow(bh);
14771da177e4SLinus Torvalds return bh;
14781da177e4SLinus Torvalds }
14793b5e6454SGioh Kim EXPORT_SYMBOL(__bread_gfp);
14801da177e4SLinus Torvalds
__invalidate_bh_lrus(struct bh_lru * b)14818cc621d2SMinchan Kim static void __invalidate_bh_lrus(struct bh_lru *b)
14828cc621d2SMinchan Kim {
14838cc621d2SMinchan Kim int i;
14848cc621d2SMinchan Kim
14858cc621d2SMinchan Kim for (i = 0; i < BH_LRU_SIZE; i++) {
14868cc621d2SMinchan Kim brelse(b->bhs[i]);
14878cc621d2SMinchan Kim b->bhs[i] = NULL;
14888cc621d2SMinchan Kim }
14898cc621d2SMinchan Kim }
14901da177e4SLinus Torvalds /*
14911da177e4SLinus Torvalds * invalidate_bh_lrus() is called rarely - but not only at unmount.
14921da177e4SLinus Torvalds * This doesn't race because it runs in each cpu either in irq
14931da177e4SLinus Torvalds * or with preempt disabled.
14941da177e4SLinus Torvalds */
invalidate_bh_lru(void * arg)14951da177e4SLinus Torvalds static void invalidate_bh_lru(void *arg)
14961da177e4SLinus Torvalds {
14971da177e4SLinus Torvalds struct bh_lru *b = &get_cpu_var(bh_lrus);
14981da177e4SLinus Torvalds
14998cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15001da177e4SLinus Torvalds put_cpu_var(bh_lrus);
15011da177e4SLinus Torvalds }
15021da177e4SLinus Torvalds
has_bh_in_lru(int cpu,void * dummy)15038cc621d2SMinchan Kim bool has_bh_in_lru(int cpu, void *dummy)
150442be35d0SGilad Ben-Yossef {
150542be35d0SGilad Ben-Yossef struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
150642be35d0SGilad Ben-Yossef int i;
150742be35d0SGilad Ben-Yossef
150842be35d0SGilad Ben-Yossef for (i = 0; i < BH_LRU_SIZE; i++) {
150942be35d0SGilad Ben-Yossef if (b->bhs[i])
15101d706679SSaurav Girepunje return true;
151142be35d0SGilad Ben-Yossef }
151242be35d0SGilad Ben-Yossef
15131d706679SSaurav Girepunje return false;
151442be35d0SGilad Ben-Yossef }
151542be35d0SGilad Ben-Yossef
invalidate_bh_lrus(void)1516f9a14399SPeter Zijlstra void invalidate_bh_lrus(void)
15171da177e4SLinus Torvalds {
1518cb923159SSebastian Andrzej Siewior on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
15191da177e4SLinus Torvalds }
15209db5579bSNick Piggin EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
15211da177e4SLinus Torvalds
1522243418e3SMinchan Kim /*
1523243418e3SMinchan Kim * It's called from workqueue context so we need a bh_lru_lock to close
1524243418e3SMinchan Kim * the race with preemption/irq.
1525243418e3SMinchan Kim */
invalidate_bh_lrus_cpu(void)1526243418e3SMinchan Kim void invalidate_bh_lrus_cpu(void)
15278cc621d2SMinchan Kim {
15288cc621d2SMinchan Kim struct bh_lru *b;
15298cc621d2SMinchan Kim
15308cc621d2SMinchan Kim bh_lru_lock();
1531243418e3SMinchan Kim b = this_cpu_ptr(&bh_lrus);
15328cc621d2SMinchan Kim __invalidate_bh_lrus(b);
15338cc621d2SMinchan Kim bh_lru_unlock();
15348cc621d2SMinchan Kim }
15358cc621d2SMinchan Kim
folio_set_bh(struct buffer_head * bh,struct folio * folio,unsigned long offset)1536465e5e6aSPankaj Raghav void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1537465e5e6aSPankaj Raghav unsigned long offset)
1538465e5e6aSPankaj Raghav {
1539465e5e6aSPankaj Raghav bh->b_folio = folio;
1540465e5e6aSPankaj Raghav BUG_ON(offset >= folio_size(folio));
1541465e5e6aSPankaj Raghav if (folio_test_highmem(folio))
1542465e5e6aSPankaj Raghav /*
1543465e5e6aSPankaj Raghav * This catches illegal uses and preserves the offset:
1544465e5e6aSPankaj Raghav */
1545465e5e6aSPankaj Raghav bh->b_data = (char *)(0 + offset);
1546465e5e6aSPankaj Raghav else
1547465e5e6aSPankaj Raghav bh->b_data = folio_address(folio) + offset;
1548465e5e6aSPankaj Raghav }
1549465e5e6aSPankaj Raghav EXPORT_SYMBOL(folio_set_bh);
1550465e5e6aSPankaj Raghav
15511da177e4SLinus Torvalds /*
15521da177e4SLinus Torvalds * Called when truncating a buffer on a page completely.
15531da177e4SLinus Torvalds */
1554e7470ee8SMel Gorman
1555e7470ee8SMel Gorman /* Bits that are cleared during an invalidate */
1556e7470ee8SMel Gorman #define BUFFER_FLAGS_DISCARD \
1557e7470ee8SMel Gorman (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1558e7470ee8SMel Gorman 1 << BH_Delay | 1 << BH_Unwritten)
1559e7470ee8SMel Gorman
discard_buffer(struct buffer_head * bh)1560858119e1SArjan van de Ven static void discard_buffer(struct buffer_head * bh)
15611da177e4SLinus Torvalds {
1562b0192296SUros Bizjak unsigned long b_state;
1563e7470ee8SMel Gorman
15641da177e4SLinus Torvalds lock_buffer(bh);
15651da177e4SLinus Torvalds clear_buffer_dirty(bh);
15661da177e4SLinus Torvalds bh->b_bdev = NULL;
1567b0192296SUros Bizjak b_state = READ_ONCE(bh->b_state);
1568b0192296SUros Bizjak do {
1569b0192296SUros Bizjak } while (!try_cmpxchg(&bh->b_state, &b_state,
1570b0192296SUros Bizjak b_state & ~BUFFER_FLAGS_DISCARD));
15711da177e4SLinus Torvalds unlock_buffer(bh);
15721da177e4SLinus Torvalds }
15731da177e4SLinus Torvalds
15741da177e4SLinus Torvalds /**
15757ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
15767ba13abbSMatthew Wilcox (Oracle) * @folio: The folio which is affected.
1577d47992f8SLukas Czerner * @offset: start of the range to invalidate
1578d47992f8SLukas Czerner * @length: length of the range to invalidate
15791da177e4SLinus Torvalds *
15807ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() is called when all or part of the folio has been
15811da177e4SLinus Torvalds * invalidated by a truncate operation.
15821da177e4SLinus Torvalds *
15837ba13abbSMatthew Wilcox (Oracle) * block_invalidate_folio() does not have to release all buffers, but it must
15841da177e4SLinus Torvalds * ensure that no dirty buffer is left outside @offset and that no I/O
15851da177e4SLinus Torvalds * is underway against any of the blocks which are outside the truncation
15861da177e4SLinus Torvalds * point. Because the caller is about to free (and possibly reuse) those
15871da177e4SLinus Torvalds * blocks on-disk.
15881da177e4SLinus Torvalds */
block_invalidate_folio(struct folio * folio,size_t offset,size_t length)15897ba13abbSMatthew Wilcox (Oracle) void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
15901da177e4SLinus Torvalds {
15911da177e4SLinus Torvalds struct buffer_head *head, *bh, *next;
15927ba13abbSMatthew Wilcox (Oracle) size_t curr_off = 0;
15937ba13abbSMatthew Wilcox (Oracle) size_t stop = length + offset;
15941da177e4SLinus Torvalds
15957ba13abbSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
15961da177e4SLinus Torvalds
1597d47992f8SLukas Czerner /*
1598d47992f8SLukas Czerner * Check for overflow
1599d47992f8SLukas Czerner */
16007ba13abbSMatthew Wilcox (Oracle) BUG_ON(stop > folio_size(folio) || stop < length);
1601d47992f8SLukas Czerner
16027ba13abbSMatthew Wilcox (Oracle) head = folio_buffers(folio);
16037ba13abbSMatthew Wilcox (Oracle) if (!head)
16047ba13abbSMatthew Wilcox (Oracle) return;
16057ba13abbSMatthew Wilcox (Oracle)
16061da177e4SLinus Torvalds bh = head;
16071da177e4SLinus Torvalds do {
16087ba13abbSMatthew Wilcox (Oracle) size_t next_off = curr_off + bh->b_size;
16091da177e4SLinus Torvalds next = bh->b_this_page;
16101da177e4SLinus Torvalds
16111da177e4SLinus Torvalds /*
1612d47992f8SLukas Czerner * Are we still fully in range ?
1613d47992f8SLukas Czerner */
1614d47992f8SLukas Czerner if (next_off > stop)
1615d47992f8SLukas Czerner goto out;
1616d47992f8SLukas Czerner
1617d47992f8SLukas Czerner /*
16181da177e4SLinus Torvalds * is this block fully invalidated?
16191da177e4SLinus Torvalds */
16201da177e4SLinus Torvalds if (offset <= curr_off)
16211da177e4SLinus Torvalds discard_buffer(bh);
16221da177e4SLinus Torvalds curr_off = next_off;
16231da177e4SLinus Torvalds bh = next;
16241da177e4SLinus Torvalds } while (bh != head);
16251da177e4SLinus Torvalds
16261da177e4SLinus Torvalds /*
16277ba13abbSMatthew Wilcox (Oracle) * We release buffers only if the entire folio is being invalidated.
16281da177e4SLinus Torvalds * The get_block cached value has been unconditionally invalidated,
16291da177e4SLinus Torvalds * so real IO is not possible anymore.
16301da177e4SLinus Torvalds */
16317ba13abbSMatthew Wilcox (Oracle) if (length == folio_size(folio))
16327ba13abbSMatthew Wilcox (Oracle) filemap_release_folio(folio, 0);
16331da177e4SLinus Torvalds out:
16342ff28e22SNeilBrown return;
16351da177e4SLinus Torvalds }
16367ba13abbSMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_invalidate_folio);
16371da177e4SLinus Torvalds
16381da177e4SLinus Torvalds /*
16391da177e4SLinus Torvalds * We attach and possibly dirty the buffers atomically wrt
1640e621900aSMatthew Wilcox (Oracle) * block_dirty_folio() via private_lock. try_to_free_buffers
16418e2e1756SPankaj Raghav * is already excluded via the folio lock.
16421da177e4SLinus Torvalds */
folio_create_empty_buffers(struct folio * folio,unsigned long blocksize,unsigned long b_state)1643*724dc6daSMatthew Wilcox (Oracle) struct buffer_head *folio_create_empty_buffers(struct folio *folio,
1644*724dc6daSMatthew Wilcox (Oracle) unsigned long blocksize, unsigned long b_state)
16451da177e4SLinus Torvalds {
16461da177e4SLinus Torvalds struct buffer_head *bh, *head, *tail;
16471da177e4SLinus Torvalds
16488e2e1756SPankaj Raghav head = folio_alloc_buffers(folio, blocksize, true);
16491da177e4SLinus Torvalds bh = head;
16501da177e4SLinus Torvalds do {
16511da177e4SLinus Torvalds bh->b_state |= b_state;
16521da177e4SLinus Torvalds tail = bh;
16531da177e4SLinus Torvalds bh = bh->b_this_page;
16541da177e4SLinus Torvalds } while (bh);
16551da177e4SLinus Torvalds tail->b_this_page = head;
16561da177e4SLinus Torvalds
16578e2e1756SPankaj Raghav spin_lock(&folio->mapping->private_lock);
16588e2e1756SPankaj Raghav if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
16591da177e4SLinus Torvalds bh = head;
16601da177e4SLinus Torvalds do {
16618e2e1756SPankaj Raghav if (folio_test_dirty(folio))
16621da177e4SLinus Torvalds set_buffer_dirty(bh);
16638e2e1756SPankaj Raghav if (folio_test_uptodate(folio))
16641da177e4SLinus Torvalds set_buffer_uptodate(bh);
16651da177e4SLinus Torvalds bh = bh->b_this_page;
16661da177e4SLinus Torvalds } while (bh != head);
16671da177e4SLinus Torvalds }
16688e2e1756SPankaj Raghav folio_attach_private(folio, head);
16698e2e1756SPankaj Raghav spin_unlock(&folio->mapping->private_lock);
1670*724dc6daSMatthew Wilcox (Oracle)
1671*724dc6daSMatthew Wilcox (Oracle) return head;
16728e2e1756SPankaj Raghav }
16738e2e1756SPankaj Raghav EXPORT_SYMBOL(folio_create_empty_buffers);
16748e2e1756SPankaj Raghav
create_empty_buffers(struct page * page,unsigned long blocksize,unsigned long b_state)16758e2e1756SPankaj Raghav void create_empty_buffers(struct page *page,
16768e2e1756SPankaj Raghav unsigned long blocksize, unsigned long b_state)
16778e2e1756SPankaj Raghav {
16788e2e1756SPankaj Raghav folio_create_empty_buffers(page_folio(page), blocksize, b_state);
16791da177e4SLinus Torvalds }
16801da177e4SLinus Torvalds EXPORT_SYMBOL(create_empty_buffers);
16811da177e4SLinus Torvalds
168229f3ad7dSJan Kara /**
168329f3ad7dSJan Kara * clean_bdev_aliases: clean a range of buffers in block device
168429f3ad7dSJan Kara * @bdev: Block device to clean buffers in
168529f3ad7dSJan Kara * @block: Start of a range of blocks to clean
168629f3ad7dSJan Kara * @len: Number of blocks to clean
16871da177e4SLinus Torvalds *
168829f3ad7dSJan Kara * We are taking a range of blocks for data and we don't want writeback of any
168929f3ad7dSJan Kara * buffer-cache aliases starting from return from this function and until the
169029f3ad7dSJan Kara * moment when something will explicitly mark the buffer dirty (hopefully that
169129f3ad7dSJan Kara * will not happen until we will free that block ;-) We don't even need to mark
169229f3ad7dSJan Kara * it not-uptodate - nobody can expect anything from a newly allocated buffer
169329f3ad7dSJan Kara * anyway. We used to use unmap_buffer() for such invalidation, but that was
169429f3ad7dSJan Kara * wrong. We definitely don't want to mark the alias unmapped, for example - it
169529f3ad7dSJan Kara * would confuse anyone who might pick it with bread() afterwards...
169629f3ad7dSJan Kara *
169729f3ad7dSJan Kara * Also.. Note that bforget() doesn't lock the buffer. So there can be
169829f3ad7dSJan Kara * writeout I/O going on against recently-freed buffers. We don't wait on that
169929f3ad7dSJan Kara * I/O in bforget() - it's more efficient to wait on the I/O only if we really
170029f3ad7dSJan Kara * need to. That happens here.
17011da177e4SLinus Torvalds */
clean_bdev_aliases(struct block_device * bdev,sector_t block,sector_t len)170229f3ad7dSJan Kara void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
17031da177e4SLinus Torvalds {
170429f3ad7dSJan Kara struct inode *bd_inode = bdev->bd_inode;
170529f3ad7dSJan Kara struct address_space *bd_mapping = bd_inode->i_mapping;
17069e0b6f31SMatthew Wilcox (Oracle) struct folio_batch fbatch;
170729f3ad7dSJan Kara pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
170829f3ad7dSJan Kara pgoff_t end;
1709c10f778dSJan Kara int i, count;
171029f3ad7dSJan Kara struct buffer_head *bh;
171129f3ad7dSJan Kara struct buffer_head *head;
17121da177e4SLinus Torvalds
171329f3ad7dSJan Kara end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
17149e0b6f31SMatthew Wilcox (Oracle) folio_batch_init(&fbatch);
17159e0b6f31SMatthew Wilcox (Oracle) while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
17169e0b6f31SMatthew Wilcox (Oracle) count = folio_batch_count(&fbatch);
1717c10f778dSJan Kara for (i = 0; i < count; i++) {
17189e0b6f31SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
17191da177e4SLinus Torvalds
17209e0b6f31SMatthew Wilcox (Oracle) if (!folio_buffers(folio))
172129f3ad7dSJan Kara continue;
172229f3ad7dSJan Kara /*
17239e0b6f31SMatthew Wilcox (Oracle) * We use folio lock instead of bd_mapping->private_lock
172429f3ad7dSJan Kara * to pin buffers here since we can afford to sleep and
172529f3ad7dSJan Kara * it scales better than a global spinlock lock.
172629f3ad7dSJan Kara */
17279e0b6f31SMatthew Wilcox (Oracle) folio_lock(folio);
17289e0b6f31SMatthew Wilcox (Oracle) /* Recheck when the folio is locked which pins bhs */
17299e0b6f31SMatthew Wilcox (Oracle) head = folio_buffers(folio);
17309e0b6f31SMatthew Wilcox (Oracle) if (!head)
173129f3ad7dSJan Kara goto unlock_page;
173229f3ad7dSJan Kara bh = head;
173329f3ad7dSJan Kara do {
17346c006a9dSChandan Rajendra if (!buffer_mapped(bh) || (bh->b_blocknr < block))
173529f3ad7dSJan Kara goto next;
173629f3ad7dSJan Kara if (bh->b_blocknr >= block + len)
173729f3ad7dSJan Kara break;
173829f3ad7dSJan Kara clear_buffer_dirty(bh);
173929f3ad7dSJan Kara wait_on_buffer(bh);
174029f3ad7dSJan Kara clear_buffer_req(bh);
174129f3ad7dSJan Kara next:
174229f3ad7dSJan Kara bh = bh->b_this_page;
174329f3ad7dSJan Kara } while (bh != head);
174429f3ad7dSJan Kara unlock_page:
17459e0b6f31SMatthew Wilcox (Oracle) folio_unlock(folio);
174629f3ad7dSJan Kara }
17479e0b6f31SMatthew Wilcox (Oracle) folio_batch_release(&fbatch);
174829f3ad7dSJan Kara cond_resched();
1749c10f778dSJan Kara /* End of range already reached? */
1750c10f778dSJan Kara if (index > end || !index)
1751c10f778dSJan Kara break;
17521da177e4SLinus Torvalds }
17531da177e4SLinus Torvalds }
175429f3ad7dSJan Kara EXPORT_SYMBOL(clean_bdev_aliases);
17551da177e4SLinus Torvalds
17561da177e4SLinus Torvalds /*
175745bce8f3SLinus Torvalds * Size is a power-of-two in the range 512..PAGE_SIZE,
175845bce8f3SLinus Torvalds * and the case we care about most is PAGE_SIZE.
175945bce8f3SLinus Torvalds *
176045bce8f3SLinus Torvalds * So this *could* possibly be written with those
176145bce8f3SLinus Torvalds * constraints in mind (relevant mostly if some
176245bce8f3SLinus Torvalds * architecture has a slow bit-scan instruction)
176345bce8f3SLinus Torvalds */
block_size_bits(unsigned int blocksize)176445bce8f3SLinus Torvalds static inline int block_size_bits(unsigned int blocksize)
176545bce8f3SLinus Torvalds {
176645bce8f3SLinus Torvalds return ilog2(blocksize);
176745bce8f3SLinus Torvalds }
176845bce8f3SLinus Torvalds
folio_create_buffers(struct folio * folio,struct inode * inode,unsigned int b_state)1769c6c8c3e7SPankaj Raghav static struct buffer_head *folio_create_buffers(struct folio *folio,
1770c6c8c3e7SPankaj Raghav struct inode *inode,
1771c6c8c3e7SPankaj Raghav unsigned int b_state)
177245bce8f3SLinus Torvalds {
1773*724dc6daSMatthew Wilcox (Oracle) struct buffer_head *bh;
1774*724dc6daSMatthew Wilcox (Oracle)
1775c6c8c3e7SPankaj Raghav BUG_ON(!folio_test_locked(folio));
177645bce8f3SLinus Torvalds
1777*724dc6daSMatthew Wilcox (Oracle) bh = folio_buffers(folio);
1778*724dc6daSMatthew Wilcox (Oracle) if (!bh)
1779*724dc6daSMatthew Wilcox (Oracle) bh = folio_create_empty_buffers(folio,
1780*724dc6daSMatthew Wilcox (Oracle) 1 << READ_ONCE(inode->i_blkbits), b_state);
1781*724dc6daSMatthew Wilcox (Oracle) return bh;
178245bce8f3SLinus Torvalds }
178345bce8f3SLinus Torvalds
178445bce8f3SLinus Torvalds /*
17851da177e4SLinus Torvalds * NOTE! All mapped/uptodate combinations are valid:
17861da177e4SLinus Torvalds *
17871da177e4SLinus Torvalds * Mapped Uptodate Meaning
17881da177e4SLinus Torvalds *
17891da177e4SLinus Torvalds * No No "unknown" - must do get_block()
17901da177e4SLinus Torvalds * No Yes "hole" - zero-filled
17911da177e4SLinus Torvalds * Yes No "allocated" - allocated on disk, not read in
17921da177e4SLinus Torvalds * Yes Yes "valid" - allocated and up-to-date in memory.
17931da177e4SLinus Torvalds *
17941da177e4SLinus Torvalds * "Dirty" is valid only with the last case (mapped+uptodate).
17951da177e4SLinus Torvalds */
17961da177e4SLinus Torvalds
17971da177e4SLinus Torvalds /*
17981da177e4SLinus Torvalds * While block_write_full_page is writing back the dirty buffers under
17991da177e4SLinus Torvalds * the page lock, whoever dirtied the buffers may decide to clean them
18001da177e4SLinus Torvalds * again at any time. We handle that by only looking at the buffer
18011da177e4SLinus Torvalds * state inside lock_buffer().
18021da177e4SLinus Torvalds *
18031da177e4SLinus Torvalds * If block_write_full_page() is called for regular writeback
18041da177e4SLinus Torvalds * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
18051da177e4SLinus Torvalds * locked buffer. This only can happen if someone has written the buffer
18061da177e4SLinus Torvalds * directly, with submit_bh(). At the address_space level PageWriteback
18071da177e4SLinus Torvalds * prevents this contention from occurring.
18086e34eeddSTheodore Ts'o *
18096e34eeddSTheodore Ts'o * If block_write_full_page() is called with wbc->sync_mode ==
181070fd7614SChristoph Hellwig * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1811721a9602SJens Axboe * causes the writes to be flagged as synchronous writes.
18121da177e4SLinus Torvalds */
__block_write_full_folio(struct inode * inode,struct folio * folio,get_block_t * get_block,struct writeback_control * wbc,bh_end_io_t * handler)181353418a18SMatthew Wilcox (Oracle) int __block_write_full_folio(struct inode *inode, struct folio *folio,
181435c80d5fSChris Mason get_block_t *get_block, struct writeback_control *wbc,
181535c80d5fSChris Mason bh_end_io_t *handler)
18161da177e4SLinus Torvalds {
18171da177e4SLinus Torvalds int err;
18181da177e4SLinus Torvalds sector_t block;
18191da177e4SLinus Torvalds sector_t last_block;
1820f0fbd5fcSAndrew Morton struct buffer_head *bh, *head;
182145bce8f3SLinus Torvalds unsigned int blocksize, bbits;
18221da177e4SLinus Torvalds int nr_underway = 0;
18233ae72869SBart Van Assche blk_opf_t write_flags = wbc_to_write_flags(wbc);
18241da177e4SLinus Torvalds
182553418a18SMatthew Wilcox (Oracle) head = folio_create_buffers(folio, inode,
18261da177e4SLinus Torvalds (1 << BH_Dirty) | (1 << BH_Uptodate));
18271da177e4SLinus Torvalds
18281da177e4SLinus Torvalds /*
1829e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio
18301da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at
18311da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it
183253418a18SMatthew Wilcox (Oracle) * then we just miss that fact, and the folio stays dirty.
18331da177e4SLinus Torvalds *
1834e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio;
18351da177e4SLinus Torvalds * handle that here by just cleaning them.
18361da177e4SLinus Torvalds */
18371da177e4SLinus Torvalds
18381da177e4SLinus Torvalds bh = head;
183945bce8f3SLinus Torvalds blocksize = bh->b_size;
184045bce8f3SLinus Torvalds bbits = block_size_bits(blocksize);
184145bce8f3SLinus Torvalds
184253418a18SMatthew Wilcox (Oracle) block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
184345bce8f3SLinus Torvalds last_block = (i_size_read(inode) - 1) >> bbits;
18441da177e4SLinus Torvalds
18451da177e4SLinus Torvalds /*
18461da177e4SLinus Torvalds * Get all the dirty buffers mapped to disk addresses and
18471da177e4SLinus Torvalds * handle any aliases from the underlying blockdev's mapping.
18481da177e4SLinus Torvalds */
18491da177e4SLinus Torvalds do {
18501da177e4SLinus Torvalds if (block > last_block) {
18511da177e4SLinus Torvalds /*
18521da177e4SLinus Torvalds * mapped buffers outside i_size will occur, because
185353418a18SMatthew Wilcox (Oracle) * this folio can be outside i_size when there is a
18541da177e4SLinus Torvalds * truncate in progress.
18551da177e4SLinus Torvalds */
18561da177e4SLinus Torvalds /*
18571da177e4SLinus Torvalds * The buffer was zeroed by block_write_full_page()
18581da177e4SLinus Torvalds */
18591da177e4SLinus Torvalds clear_buffer_dirty(bh);
18601da177e4SLinus Torvalds set_buffer_uptodate(bh);
186129a814d2SAlex Tomas } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
186229a814d2SAlex Tomas buffer_dirty(bh)) {
1863b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
18641da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
18651da177e4SLinus Torvalds if (err)
18661da177e4SLinus Torvalds goto recover;
186729a814d2SAlex Tomas clear_buffer_delay(bh);
18681da177e4SLinus Torvalds if (buffer_new(bh)) {
18691da177e4SLinus Torvalds /* blockdev mappings never come here */
18701da177e4SLinus Torvalds clear_buffer_new(bh);
1871e64855c6SJan Kara clean_bdev_bh_alias(bh);
18721da177e4SLinus Torvalds }
18731da177e4SLinus Torvalds }
18741da177e4SLinus Torvalds bh = bh->b_this_page;
18751da177e4SLinus Torvalds block++;
18761da177e4SLinus Torvalds } while (bh != head);
18771da177e4SLinus Torvalds
18781da177e4SLinus Torvalds do {
18791da177e4SLinus Torvalds if (!buffer_mapped(bh))
18801da177e4SLinus Torvalds continue;
18811da177e4SLinus Torvalds /*
18821da177e4SLinus Torvalds * If it's a fully non-blocking write attempt and we cannot
188353418a18SMatthew Wilcox (Oracle) * lock the buffer then redirty the folio. Note that this can
18845b0830cbSJens Axboe * potentially cause a busy-wait loop from writeback threads
18855b0830cbSJens Axboe * and kswapd activity, but those code paths have their own
18865b0830cbSJens Axboe * higher-level throttling.
18871da177e4SLinus Torvalds */
18881b430beeSWu Fengguang if (wbc->sync_mode != WB_SYNC_NONE) {
18891da177e4SLinus Torvalds lock_buffer(bh);
1890ca5de404SNick Piggin } else if (!trylock_buffer(bh)) {
189153418a18SMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio);
18921da177e4SLinus Torvalds continue;
18931da177e4SLinus Torvalds }
18941da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
189535c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler);
18961da177e4SLinus Torvalds } else {
18971da177e4SLinus Torvalds unlock_buffer(bh);
18981da177e4SLinus Torvalds }
18991da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
19001da177e4SLinus Torvalds
19011da177e4SLinus Torvalds /*
190253418a18SMatthew Wilcox (Oracle) * The folio and its buffers are protected by the writeback flag,
190353418a18SMatthew Wilcox (Oracle) * so we can drop the bh refcounts early.
19041da177e4SLinus Torvalds */
190553418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
190653418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19071da177e4SLinus Torvalds
19081da177e4SLinus Torvalds do {
19091da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19101da177e4SLinus Torvalds if (buffer_async_write(bh)) {
19111420c4a5SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
19121da177e4SLinus Torvalds nr_underway++;
1913ad576e63SNick Piggin }
19141da177e4SLinus Torvalds bh = next;
19151da177e4SLinus Torvalds } while (bh != head);
191653418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
19171da177e4SLinus Torvalds
19181da177e4SLinus Torvalds err = 0;
19191da177e4SLinus Torvalds done:
19201da177e4SLinus Torvalds if (nr_underway == 0) {
19211da177e4SLinus Torvalds /*
192253418a18SMatthew Wilcox (Oracle) * The folio was marked dirty, but the buffers were
19231da177e4SLinus Torvalds * clean. Someone wrote them back by hand with
192479f59784SZhang Yi * write_dirty_buffer/submit_bh. A rare case.
19251da177e4SLinus Torvalds */
192653418a18SMatthew Wilcox (Oracle) folio_end_writeback(folio);
19273d67f2d7SNick Piggin
19281da177e4SLinus Torvalds /*
192953418a18SMatthew Wilcox (Oracle) * The folio and buffer_heads can be released at any time from
19301da177e4SLinus Torvalds * here on.
19311da177e4SLinus Torvalds */
19321da177e4SLinus Torvalds }
19331da177e4SLinus Torvalds return err;
19341da177e4SLinus Torvalds
19351da177e4SLinus Torvalds recover:
19361da177e4SLinus Torvalds /*
19371da177e4SLinus Torvalds * ENOSPC, or some other error. We may already have added some
19381da177e4SLinus Torvalds * blocks to the file, so we need to write these out to avoid
19391da177e4SLinus Torvalds * exposing stale data.
194053418a18SMatthew Wilcox (Oracle) * The folio is currently locked and not marked for writeback
19411da177e4SLinus Torvalds */
19421da177e4SLinus Torvalds bh = head;
19431da177e4SLinus Torvalds /* Recovery: lock and submit the mapped buffers */
19441da177e4SLinus Torvalds do {
194529a814d2SAlex Tomas if (buffer_mapped(bh) && buffer_dirty(bh) &&
194629a814d2SAlex Tomas !buffer_delay(bh)) {
19471da177e4SLinus Torvalds lock_buffer(bh);
194835c80d5fSChris Mason mark_buffer_async_write_endio(bh, handler);
19491da177e4SLinus Torvalds } else {
19501da177e4SLinus Torvalds /*
19511da177e4SLinus Torvalds * The buffer may have been set dirty during
195253418a18SMatthew Wilcox (Oracle) * attachment to a dirty folio.
19531da177e4SLinus Torvalds */
19541da177e4SLinus Torvalds clear_buffer_dirty(bh);
19551da177e4SLinus Torvalds }
19561da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
195753418a18SMatthew Wilcox (Oracle) folio_set_error(folio);
195853418a18SMatthew Wilcox (Oracle) BUG_ON(folio_test_writeback(folio));
195953418a18SMatthew Wilcox (Oracle) mapping_set_error(folio->mapping, err);
196053418a18SMatthew Wilcox (Oracle) folio_start_writeback(folio);
19611da177e4SLinus Torvalds do {
19621da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
19631da177e4SLinus Torvalds if (buffer_async_write(bh)) {
19641da177e4SLinus Torvalds clear_buffer_dirty(bh);
19651420c4a5SBart Van Assche submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
19661da177e4SLinus Torvalds nr_underway++;
1967ad576e63SNick Piggin }
19681da177e4SLinus Torvalds bh = next;
19691da177e4SLinus Torvalds } while (bh != head);
197053418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
19711da177e4SLinus Torvalds goto done;
19721da177e4SLinus Torvalds }
197353418a18SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__block_write_full_folio);
19741da177e4SLinus Torvalds
1975afddba49SNick Piggin /*
19764a9622f2SMatthew Wilcox (Oracle) * If a folio has any new buffers, zero them out here, and mark them uptodate
1977afddba49SNick Piggin * and dirty so they'll be written out (in order to prevent uninitialised
1978afddba49SNick Piggin * block data from leaking). And clear the new bit.
1979afddba49SNick Piggin */
folio_zero_new_buffers(struct folio * folio,size_t from,size_t to)19804a9622f2SMatthew Wilcox (Oracle) void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1981afddba49SNick Piggin {
19824a9622f2SMatthew Wilcox (Oracle) size_t block_start, block_end;
1983afddba49SNick Piggin struct buffer_head *head, *bh;
1984afddba49SNick Piggin
19854a9622f2SMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
19864a9622f2SMatthew Wilcox (Oracle) head = folio_buffers(folio);
19874a9622f2SMatthew Wilcox (Oracle) if (!head)
1988afddba49SNick Piggin return;
1989afddba49SNick Piggin
19904a9622f2SMatthew Wilcox (Oracle) bh = head;
1991afddba49SNick Piggin block_start = 0;
1992afddba49SNick Piggin do {
1993afddba49SNick Piggin block_end = block_start + bh->b_size;
1994afddba49SNick Piggin
1995afddba49SNick Piggin if (buffer_new(bh)) {
1996afddba49SNick Piggin if (block_end > from && block_start < to) {
19974a9622f2SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio)) {
19984a9622f2SMatthew Wilcox (Oracle) size_t start, xend;
1999afddba49SNick Piggin
2000afddba49SNick Piggin start = max(from, block_start);
20014a9622f2SMatthew Wilcox (Oracle) xend = min(to, block_end);
2002afddba49SNick Piggin
20034a9622f2SMatthew Wilcox (Oracle) folio_zero_segment(folio, start, xend);
2004afddba49SNick Piggin set_buffer_uptodate(bh);
2005afddba49SNick Piggin }
2006afddba49SNick Piggin
2007afddba49SNick Piggin clear_buffer_new(bh);
2008afddba49SNick Piggin mark_buffer_dirty(bh);
2009afddba49SNick Piggin }
2010afddba49SNick Piggin }
2011afddba49SNick Piggin
2012afddba49SNick Piggin block_start = block_end;
2013afddba49SNick Piggin bh = bh->b_this_page;
2014afddba49SNick Piggin } while (bh != head);
2015afddba49SNick Piggin }
20164a9622f2SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_zero_new_buffers);
2017afddba49SNick Piggin
20184aa8cdd5SChristoph Hellwig static int
iomap_to_bh(struct inode * inode,sector_t block,struct buffer_head * bh,const struct iomap * iomap)2019ae259a9cSChristoph Hellwig iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
20206d49cc85SChristoph Hellwig const struct iomap *iomap)
2021ae259a9cSChristoph Hellwig {
2022ae259a9cSChristoph Hellwig loff_t offset = block << inode->i_blkbits;
2023ae259a9cSChristoph Hellwig
2024ae259a9cSChristoph Hellwig bh->b_bdev = iomap->bdev;
2025ae259a9cSChristoph Hellwig
2026ae259a9cSChristoph Hellwig /*
2027ae259a9cSChristoph Hellwig * Block points to offset in file we need to map, iomap contains
2028ae259a9cSChristoph Hellwig * the offset at which the map starts. If the map ends before the
2029ae259a9cSChristoph Hellwig * current block, then do not map the buffer and let the caller
2030ae259a9cSChristoph Hellwig * handle it.
2031ae259a9cSChristoph Hellwig */
20324aa8cdd5SChristoph Hellwig if (offset >= iomap->offset + iomap->length)
20334aa8cdd5SChristoph Hellwig return -EIO;
2034ae259a9cSChristoph Hellwig
2035ae259a9cSChristoph Hellwig switch (iomap->type) {
2036ae259a9cSChristoph Hellwig case IOMAP_HOLE:
2037ae259a9cSChristoph Hellwig /*
2038ae259a9cSChristoph Hellwig * If the buffer is not up to date or beyond the current EOF,
2039ae259a9cSChristoph Hellwig * we need to mark it as new to ensure sub-block zeroing is
2040ae259a9cSChristoph Hellwig * executed if necessary.
2041ae259a9cSChristoph Hellwig */
2042ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2043ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2044ae259a9cSChristoph Hellwig set_buffer_new(bh);
20454aa8cdd5SChristoph Hellwig return 0;
2046ae259a9cSChristoph Hellwig case IOMAP_DELALLOC:
2047ae259a9cSChristoph Hellwig if (!buffer_uptodate(bh) ||
2048ae259a9cSChristoph Hellwig (offset >= i_size_read(inode)))
2049ae259a9cSChristoph Hellwig set_buffer_new(bh);
2050ae259a9cSChristoph Hellwig set_buffer_uptodate(bh);
2051ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
2052ae259a9cSChristoph Hellwig set_buffer_delay(bh);
20534aa8cdd5SChristoph Hellwig return 0;
2054ae259a9cSChristoph Hellwig case IOMAP_UNWRITTEN:
2055ae259a9cSChristoph Hellwig /*
20563d7b6b21SAndreas Gruenbacher * For unwritten regions, we always need to ensure that regions
20573d7b6b21SAndreas Gruenbacher * in the block we are not writing to are zeroed. Mark the
20583d7b6b21SAndreas Gruenbacher * buffer as new to ensure this.
2059ae259a9cSChristoph Hellwig */
2060ae259a9cSChristoph Hellwig set_buffer_new(bh);
2061ae259a9cSChristoph Hellwig set_buffer_unwritten(bh);
2062df561f66SGustavo A. R. Silva fallthrough;
2063ae259a9cSChristoph Hellwig case IOMAP_MAPPED:
20643d7b6b21SAndreas Gruenbacher if ((iomap->flags & IOMAP_F_NEW) ||
2065381c0432SChristoph Hellwig offset >= i_size_read(inode)) {
2066381c0432SChristoph Hellwig /*
2067381c0432SChristoph Hellwig * This can happen if truncating the block device races
2068381c0432SChristoph Hellwig * with the check in the caller as i_size updates on
2069381c0432SChristoph Hellwig * block devices aren't synchronized by i_rwsem for
2070381c0432SChristoph Hellwig * block devices.
2071381c0432SChristoph Hellwig */
2072381c0432SChristoph Hellwig if (S_ISBLK(inode->i_mode))
2073381c0432SChristoph Hellwig return -EIO;
2074ae259a9cSChristoph Hellwig set_buffer_new(bh);
2075381c0432SChristoph Hellwig }
207619fe5f64SAndreas Gruenbacher bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
207719fe5f64SAndreas Gruenbacher inode->i_blkbits;
2078ae259a9cSChristoph Hellwig set_buffer_mapped(bh);
20794aa8cdd5SChristoph Hellwig return 0;
20804aa8cdd5SChristoph Hellwig default:
20814aa8cdd5SChristoph Hellwig WARN_ON_ONCE(1);
20824aa8cdd5SChristoph Hellwig return -EIO;
2083ae259a9cSChristoph Hellwig }
2084ae259a9cSChristoph Hellwig }
2085ae259a9cSChristoph Hellwig
__block_write_begin_int(struct folio * folio,loff_t pos,unsigned len,get_block_t * get_block,const struct iomap * iomap)2086d1bd0b4eSMatthew Wilcox (Oracle) int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
20876d49cc85SChristoph Hellwig get_block_t *get_block, const struct iomap *iomap)
20881da177e4SLinus Torvalds {
208909cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1);
2090ebdec241SChristoph Hellwig unsigned to = from + len;
2091d1bd0b4eSMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
20921da177e4SLinus Torvalds unsigned block_start, block_end;
20931da177e4SLinus Torvalds sector_t block;
20941da177e4SLinus Torvalds int err = 0;
20951da177e4SLinus Torvalds unsigned blocksize, bbits;
20961da177e4SLinus Torvalds struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
20971da177e4SLinus Torvalds
2098d1bd0b4eSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
209909cbfeafSKirill A. Shutemov BUG_ON(from > PAGE_SIZE);
210009cbfeafSKirill A. Shutemov BUG_ON(to > PAGE_SIZE);
21011da177e4SLinus Torvalds BUG_ON(from > to);
21021da177e4SLinus Torvalds
2103c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
210445bce8f3SLinus Torvalds blocksize = head->b_size;
210545bce8f3SLinus Torvalds bbits = block_size_bits(blocksize);
21061da177e4SLinus Torvalds
2107d1bd0b4eSMatthew Wilcox (Oracle) block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
21081da177e4SLinus Torvalds
21091da177e4SLinus Torvalds for(bh = head, block_start = 0; bh != head || !block_start;
21101da177e4SLinus Torvalds block++, block_start=block_end, bh = bh->b_this_page) {
21111da177e4SLinus Torvalds block_end = block_start + blocksize;
21121da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
2113d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21141da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21151da177e4SLinus Torvalds set_buffer_uptodate(bh);
21161da177e4SLinus Torvalds }
21171da177e4SLinus Torvalds continue;
21181da177e4SLinus Torvalds }
21191da177e4SLinus Torvalds if (buffer_new(bh))
21201da177e4SLinus Torvalds clear_buffer_new(bh);
21211da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2122b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
21234aa8cdd5SChristoph Hellwig if (get_block)
21241da177e4SLinus Torvalds err = get_block(inode, block, bh, 1);
21254aa8cdd5SChristoph Hellwig else
21264aa8cdd5SChristoph Hellwig err = iomap_to_bh(inode, block, bh, iomap);
21271da177e4SLinus Torvalds if (err)
2128f3ddbdc6SNick Piggin break;
2129ae259a9cSChristoph Hellwig
21301da177e4SLinus Torvalds if (buffer_new(bh)) {
2131e64855c6SJan Kara clean_bdev_bh_alias(bh);
2132d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
2133637aff46SNick Piggin clear_buffer_new(bh);
21341da177e4SLinus Torvalds set_buffer_uptodate(bh);
2135637aff46SNick Piggin mark_buffer_dirty(bh);
21361da177e4SLinus Torvalds continue;
21371da177e4SLinus Torvalds }
2138eebd2aa3SChristoph Lameter if (block_end > to || block_start < from)
2139d1bd0b4eSMatthew Wilcox (Oracle) folio_zero_segments(folio,
2140eebd2aa3SChristoph Lameter to, block_end,
2141eebd2aa3SChristoph Lameter block_start, from);
21421da177e4SLinus Torvalds continue;
21431da177e4SLinus Torvalds }
21441da177e4SLinus Torvalds }
2145d1bd0b4eSMatthew Wilcox (Oracle) if (folio_test_uptodate(folio)) {
21461da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21471da177e4SLinus Torvalds set_buffer_uptodate(bh);
21481da177e4SLinus Torvalds continue;
21491da177e4SLinus Torvalds }
21501da177e4SLinus Torvalds if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
215133a266ddSDavid Chinner !buffer_unwritten(bh) &&
21521da177e4SLinus Torvalds (block_start < from || block_end > to)) {
2153e7ea1129SZhang Yi bh_read_nowait(bh, 0);
21541da177e4SLinus Torvalds *wait_bh++=bh;
21551da177e4SLinus Torvalds }
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds /*
21581da177e4SLinus Torvalds * If we issued read requests - let them complete.
21591da177e4SLinus Torvalds */
21601da177e4SLinus Torvalds while(wait_bh > wait) {
21611da177e4SLinus Torvalds wait_on_buffer(*--wait_bh);
21621da177e4SLinus Torvalds if (!buffer_uptodate(*wait_bh))
2163f3ddbdc6SNick Piggin err = -EIO;
21641da177e4SLinus Torvalds }
2165f9f07b6cSJan Kara if (unlikely(err))
21664a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, from, to);
21671da177e4SLinus Torvalds return err;
21681da177e4SLinus Torvalds }
2169ae259a9cSChristoph Hellwig
__block_write_begin(struct page * page,loff_t pos,unsigned len,get_block_t * get_block)2170ae259a9cSChristoph Hellwig int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2171ae259a9cSChristoph Hellwig get_block_t *get_block)
2172ae259a9cSChristoph Hellwig {
2173d1bd0b4eSMatthew Wilcox (Oracle) return __block_write_begin_int(page_folio(page), pos, len, get_block,
2174d1bd0b4eSMatthew Wilcox (Oracle) NULL);
2175ae259a9cSChristoph Hellwig }
2176ebdec241SChristoph Hellwig EXPORT_SYMBOL(__block_write_begin);
21771da177e4SLinus Torvalds
__block_commit_write(struct folio * folio,size_t from,size_t to)2178a524fcfeSBean Huo static void __block_commit_write(struct folio *folio, size_t from, size_t to)
21791da177e4SLinus Torvalds {
21808c6cb3e3SMatthew Wilcox (Oracle) size_t block_start, block_end;
21818c6cb3e3SMatthew Wilcox (Oracle) bool partial = false;
21821da177e4SLinus Torvalds unsigned blocksize;
21831da177e4SLinus Torvalds struct buffer_head *bh, *head;
21841da177e4SLinus Torvalds
21858c6cb3e3SMatthew Wilcox (Oracle) bh = head = folio_buffers(folio);
21863f6bbe6eSWojciech GÅ‚adysz if (!bh)
21873f6bbe6eSWojciech GÅ‚adysz return;
218845bce8f3SLinus Torvalds blocksize = bh->b_size;
21891da177e4SLinus Torvalds
219045bce8f3SLinus Torvalds block_start = 0;
219145bce8f3SLinus Torvalds do {
21921da177e4SLinus Torvalds block_end = block_start + blocksize;
21931da177e4SLinus Torvalds if (block_end <= from || block_start >= to) {
21941da177e4SLinus Torvalds if (!buffer_uptodate(bh))
21958c6cb3e3SMatthew Wilcox (Oracle) partial = true;
21961da177e4SLinus Torvalds } else {
21971da177e4SLinus Torvalds set_buffer_uptodate(bh);
21981da177e4SLinus Torvalds mark_buffer_dirty(bh);
21991da177e4SLinus Torvalds }
22004ebd3aecSYang Guo if (buffer_new(bh))
2201afddba49SNick Piggin clear_buffer_new(bh);
220245bce8f3SLinus Torvalds
220345bce8f3SLinus Torvalds block_start = block_end;
220445bce8f3SLinus Torvalds bh = bh->b_this_page;
220545bce8f3SLinus Torvalds } while (bh != head);
22061da177e4SLinus Torvalds
22071da177e4SLinus Torvalds /*
22081da177e4SLinus Torvalds * If this is a partial write which happened to make all buffers
22092c69e205SMatthew Wilcox (Oracle) * uptodate then we can optimize away a bogus read_folio() for
22108c6cb3e3SMatthew Wilcox (Oracle) * the next read(). Here we 'discover' whether the folio went
22111da177e4SLinus Torvalds * uptodate as a result of this (potentially partial) write.
22121da177e4SLinus Torvalds */
22131da177e4SLinus Torvalds if (!partial)
22148c6cb3e3SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
22151da177e4SLinus Torvalds }
22161da177e4SLinus Torvalds
22171da177e4SLinus Torvalds /*
2218155130a4SChristoph Hellwig * block_write_begin takes care of the basic task of block allocation and
2219155130a4SChristoph Hellwig * bringing partial write blocks uptodate first.
2220155130a4SChristoph Hellwig *
22217bb46a67Snpiggin@suse.de * The filesystem needs to handle block truncation upon failure.
2222afddba49SNick Piggin */
block_write_begin(struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,get_block_t * get_block)2223155130a4SChristoph Hellwig int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2224b3992d1eSMatthew Wilcox (Oracle) struct page **pagep, get_block_t *get_block)
2225afddba49SNick Piggin {
222609cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT;
2227afddba49SNick Piggin struct page *page;
22286e1db88dSChristoph Hellwig int status;
2229afddba49SNick Piggin
2230b7446e7cSMatthew Wilcox (Oracle) page = grab_cache_page_write_begin(mapping, index);
22316e1db88dSChristoph Hellwig if (!page)
22326e1db88dSChristoph Hellwig return -ENOMEM;
2233afddba49SNick Piggin
22346e1db88dSChristoph Hellwig status = __block_write_begin(page, pos, len, get_block);
2235afddba49SNick Piggin if (unlikely(status)) {
2236afddba49SNick Piggin unlock_page(page);
223709cbfeafSKirill A. Shutemov put_page(page);
22386e1db88dSChristoph Hellwig page = NULL;
2239afddba49SNick Piggin }
2240afddba49SNick Piggin
22416e1db88dSChristoph Hellwig *pagep = page;
2242afddba49SNick Piggin return status;
2243afddba49SNick Piggin }
2244afddba49SNick Piggin EXPORT_SYMBOL(block_write_begin);
2245afddba49SNick Piggin
block_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2246afddba49SNick Piggin int block_write_end(struct file *file, struct address_space *mapping,
2247afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
2248afddba49SNick Piggin struct page *page, void *fsdata)
2249afddba49SNick Piggin {
22508c6cb3e3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
22518c6cb3e3SMatthew Wilcox (Oracle) size_t start = pos - folio_pos(folio);
2252afddba49SNick Piggin
2253afddba49SNick Piggin if (unlikely(copied < len)) {
2254afddba49SNick Piggin /*
22552c69e205SMatthew Wilcox (Oracle) * The buffers that were written will now be uptodate, so
22562c69e205SMatthew Wilcox (Oracle) * we don't have to worry about a read_folio reading them
22572c69e205SMatthew Wilcox (Oracle) * and overwriting a partial write. However if we have
22582c69e205SMatthew Wilcox (Oracle) * encountered a short write and only partially written
22592c69e205SMatthew Wilcox (Oracle) * into a buffer, it will not be marked uptodate, so a
22602c69e205SMatthew Wilcox (Oracle) * read_folio might come in and destroy our partial write.
2261afddba49SNick Piggin *
2262afddba49SNick Piggin * Do the simplest thing, and just treat any short write to a
22638c6cb3e3SMatthew Wilcox (Oracle) * non uptodate folio as a zero-length write, and force the
2264afddba49SNick Piggin * caller to redo the whole thing.
2265afddba49SNick Piggin */
22668c6cb3e3SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
2267afddba49SNick Piggin copied = 0;
2268afddba49SNick Piggin
22694a9622f2SMatthew Wilcox (Oracle) folio_zero_new_buffers(folio, start+copied, start+len);
2270afddba49SNick Piggin }
22718c6cb3e3SMatthew Wilcox (Oracle) flush_dcache_folio(folio);
2272afddba49SNick Piggin
2273afddba49SNick Piggin /* This could be a short (even 0-length) commit */
2274489b7e72SBean Huo __block_commit_write(folio, start, start + copied);
2275afddba49SNick Piggin
2276afddba49SNick Piggin return copied;
2277afddba49SNick Piggin }
2278afddba49SNick Piggin EXPORT_SYMBOL(block_write_end);
2279afddba49SNick Piggin
generic_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)2280afddba49SNick Piggin int generic_write_end(struct file *file, struct address_space *mapping,
2281afddba49SNick Piggin loff_t pos, unsigned len, unsigned copied,
2282afddba49SNick Piggin struct page *page, void *fsdata)
2283afddba49SNick Piggin {
22848af54f29SChristoph Hellwig struct inode *inode = mapping->host;
22858af54f29SChristoph Hellwig loff_t old_size = inode->i_size;
22868af54f29SChristoph Hellwig bool i_size_changed = false;
22878af54f29SChristoph Hellwig
2288afddba49SNick Piggin copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
22898af54f29SChristoph Hellwig
22908af54f29SChristoph Hellwig /*
22918af54f29SChristoph Hellwig * No need to use i_size_read() here, the i_size cannot change under us
22928af54f29SChristoph Hellwig * because we hold i_rwsem.
22938af54f29SChristoph Hellwig *
22948af54f29SChristoph Hellwig * But it's important to update i_size while still holding page lock:
22958af54f29SChristoph Hellwig * page writeout could otherwise come in and zero beyond i_size.
22968af54f29SChristoph Hellwig */
22978af54f29SChristoph Hellwig if (pos + copied > inode->i_size) {
22988af54f29SChristoph Hellwig i_size_write(inode, pos + copied);
22998af54f29SChristoph Hellwig i_size_changed = true;
23008af54f29SChristoph Hellwig }
23018af54f29SChristoph Hellwig
23028af54f29SChristoph Hellwig unlock_page(page);
23037a77dad7SAndreas Gruenbacher put_page(page);
23048af54f29SChristoph Hellwig
23058af54f29SChristoph Hellwig if (old_size < pos)
23068af54f29SChristoph Hellwig pagecache_isize_extended(inode, old_size, pos);
23078af54f29SChristoph Hellwig /*
23088af54f29SChristoph Hellwig * Don't mark the inode dirty under page lock. First, it unnecessarily
23098af54f29SChristoph Hellwig * makes the holding time of page lock longer. Second, it forces lock
23108af54f29SChristoph Hellwig * ordering of page lock and transaction start for journaling
23118af54f29SChristoph Hellwig * filesystems.
23128af54f29SChristoph Hellwig */
23138af54f29SChristoph Hellwig if (i_size_changed)
23148af54f29SChristoph Hellwig mark_inode_dirty(inode);
231526ddb1f4SAndreas Gruenbacher return copied;
2316afddba49SNick Piggin }
2317afddba49SNick Piggin EXPORT_SYMBOL(generic_write_end);
2318afddba49SNick Piggin
2319afddba49SNick Piggin /*
23202e7e80f7SMatthew Wilcox (Oracle) * block_is_partially_uptodate checks whether buffers within a folio are
23218ab22b9aSHisashi Hifumi * uptodate or not.
23228ab22b9aSHisashi Hifumi *
23232e7e80f7SMatthew Wilcox (Oracle) * Returns true if all buffers which correspond to the specified part
23242e7e80f7SMatthew Wilcox (Oracle) * of the folio are uptodate.
23258ab22b9aSHisashi Hifumi */
block_is_partially_uptodate(struct folio * folio,size_t from,size_t count)23262e7e80f7SMatthew Wilcox (Oracle) bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
23278ab22b9aSHisashi Hifumi {
23288ab22b9aSHisashi Hifumi unsigned block_start, block_end, blocksize;
23298ab22b9aSHisashi Hifumi unsigned to;
23308ab22b9aSHisashi Hifumi struct buffer_head *bh, *head;
23312e7e80f7SMatthew Wilcox (Oracle) bool ret = true;
23328ab22b9aSHisashi Hifumi
23332e7e80f7SMatthew Wilcox (Oracle) head = folio_buffers(folio);
23342e7e80f7SMatthew Wilcox (Oracle) if (!head)
23352e7e80f7SMatthew Wilcox (Oracle) return false;
233645bce8f3SLinus Torvalds blocksize = head->b_size;
23372e7e80f7SMatthew Wilcox (Oracle) to = min_t(unsigned, folio_size(folio) - from, count);
23388ab22b9aSHisashi Hifumi to = from + to;
23392e7e80f7SMatthew Wilcox (Oracle) if (from < blocksize && to > folio_size(folio) - blocksize)
23402e7e80f7SMatthew Wilcox (Oracle) return false;
23418ab22b9aSHisashi Hifumi
23428ab22b9aSHisashi Hifumi bh = head;
23438ab22b9aSHisashi Hifumi block_start = 0;
23448ab22b9aSHisashi Hifumi do {
23458ab22b9aSHisashi Hifumi block_end = block_start + blocksize;
23468ab22b9aSHisashi Hifumi if (block_end > from && block_start < to) {
23478ab22b9aSHisashi Hifumi if (!buffer_uptodate(bh)) {
23482e7e80f7SMatthew Wilcox (Oracle) ret = false;
23498ab22b9aSHisashi Hifumi break;
23508ab22b9aSHisashi Hifumi }
23518ab22b9aSHisashi Hifumi if (block_end >= to)
23528ab22b9aSHisashi Hifumi break;
23538ab22b9aSHisashi Hifumi }
23548ab22b9aSHisashi Hifumi block_start = block_end;
23558ab22b9aSHisashi Hifumi bh = bh->b_this_page;
23568ab22b9aSHisashi Hifumi } while (bh != head);
23578ab22b9aSHisashi Hifumi
23588ab22b9aSHisashi Hifumi return ret;
23598ab22b9aSHisashi Hifumi }
23608ab22b9aSHisashi Hifumi EXPORT_SYMBOL(block_is_partially_uptodate);
23618ab22b9aSHisashi Hifumi
23628ab22b9aSHisashi Hifumi /*
23632c69e205SMatthew Wilcox (Oracle) * Generic "read_folio" function for block devices that have the normal
23641da177e4SLinus Torvalds * get_block functionality. This is most of the block device filesystems.
23652c69e205SMatthew Wilcox (Oracle) * Reads the folio asynchronously --- the unlock_buffer() and
23661da177e4SLinus Torvalds * set/clear_buffer_uptodate() functions propagate buffer state into the
23672c69e205SMatthew Wilcox (Oracle) * folio once IO has completed.
23681da177e4SLinus Torvalds */
block_read_full_folio(struct folio * folio,get_block_t * get_block)23692c69e205SMatthew Wilcox (Oracle) int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23701da177e4SLinus Torvalds {
23712c69e205SMatthew Wilcox (Oracle) struct inode *inode = folio->mapping->host;
23721da177e4SLinus Torvalds sector_t iblock, lblock;
23731da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
237445bce8f3SLinus Torvalds unsigned int blocksize, bbits;
23751da177e4SLinus Torvalds int nr, i;
23761da177e4SLinus Torvalds int fully_mapped = 1;
2377b7a6eb22SMatthew Wilcox (Oracle) bool page_error = false;
23784fa512ceSEric Biggers loff_t limit = i_size_read(inode);
23794fa512ceSEric Biggers
23804fa512ceSEric Biggers /* This is needed for ext4. */
23814fa512ceSEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
23824fa512ceSEric Biggers limit = inode->i_sb->s_maxbytes;
23831da177e4SLinus Torvalds
23842c69e205SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
23852c69e205SMatthew Wilcox (Oracle)
2386c6c8c3e7SPankaj Raghav head = folio_create_buffers(folio, inode, 0);
238745bce8f3SLinus Torvalds blocksize = head->b_size;
238845bce8f3SLinus Torvalds bbits = block_size_bits(blocksize);
23891da177e4SLinus Torvalds
23902c69e205SMatthew Wilcox (Oracle) iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
23914fa512ceSEric Biggers lblock = (limit+blocksize-1) >> bbits;
23921da177e4SLinus Torvalds bh = head;
23931da177e4SLinus Torvalds nr = 0;
23941da177e4SLinus Torvalds i = 0;
23951da177e4SLinus Torvalds
23961da177e4SLinus Torvalds do {
23971da177e4SLinus Torvalds if (buffer_uptodate(bh))
23981da177e4SLinus Torvalds continue;
23991da177e4SLinus Torvalds
24001da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2401c64610baSAndrew Morton int err = 0;
2402c64610baSAndrew Morton
24031da177e4SLinus Torvalds fully_mapped = 0;
24041da177e4SLinus Torvalds if (iblock < lblock) {
2405b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
2406c64610baSAndrew Morton err = get_block(inode, iblock, bh, 0);
2407b7a6eb22SMatthew Wilcox (Oracle) if (err) {
24082c69e205SMatthew Wilcox (Oracle) folio_set_error(folio);
2409b7a6eb22SMatthew Wilcox (Oracle) page_error = true;
2410b7a6eb22SMatthew Wilcox (Oracle) }
24111da177e4SLinus Torvalds }
24121da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
24132c69e205SMatthew Wilcox (Oracle) folio_zero_range(folio, i * blocksize,
24142c69e205SMatthew Wilcox (Oracle) blocksize);
2415c64610baSAndrew Morton if (!err)
24161da177e4SLinus Torvalds set_buffer_uptodate(bh);
24171da177e4SLinus Torvalds continue;
24181da177e4SLinus Torvalds }
24191da177e4SLinus Torvalds /*
24201da177e4SLinus Torvalds * get_block() might have updated the buffer
24211da177e4SLinus Torvalds * synchronously
24221da177e4SLinus Torvalds */
24231da177e4SLinus Torvalds if (buffer_uptodate(bh))
24241da177e4SLinus Torvalds continue;
24251da177e4SLinus Torvalds }
24261da177e4SLinus Torvalds arr[nr++] = bh;
24271da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head);
24281da177e4SLinus Torvalds
24291da177e4SLinus Torvalds if (fully_mapped)
24302c69e205SMatthew Wilcox (Oracle) folio_set_mappedtodisk(folio);
24311da177e4SLinus Torvalds
24321da177e4SLinus Torvalds if (!nr) {
24331da177e4SLinus Torvalds /*
24342c69e205SMatthew Wilcox (Oracle) * All buffers are uptodate - we can set the folio uptodate
24351da177e4SLinus Torvalds * as well. But not if get_block() returned an error.
24361da177e4SLinus Torvalds */
2437b7a6eb22SMatthew Wilcox (Oracle) if (!page_error)
24382c69e205SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
24392c69e205SMatthew Wilcox (Oracle) folio_unlock(folio);
24401da177e4SLinus Torvalds return 0;
24411da177e4SLinus Torvalds }
24421da177e4SLinus Torvalds
24431da177e4SLinus Torvalds /* Stage two: lock the buffers */
24441da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
24451da177e4SLinus Torvalds bh = arr[i];
24461da177e4SLinus Torvalds lock_buffer(bh);
24471da177e4SLinus Torvalds mark_buffer_async_read(bh);
24481da177e4SLinus Torvalds }
24491da177e4SLinus Torvalds
24501da177e4SLinus Torvalds /*
24511da177e4SLinus Torvalds * Stage 3: start the IO. Check for uptodateness
24521da177e4SLinus Torvalds * inside the buffer lock in case another process reading
24531da177e4SLinus Torvalds * the underlying blockdev brought it uptodate (the sct fix).
24541da177e4SLinus Torvalds */
24551da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
24561da177e4SLinus Torvalds bh = arr[i];
24571da177e4SLinus Torvalds if (buffer_uptodate(bh))
24581da177e4SLinus Torvalds end_buffer_async_read(bh, 1);
24591da177e4SLinus Torvalds else
24601420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh);
24611da177e4SLinus Torvalds }
24621da177e4SLinus Torvalds return 0;
24631da177e4SLinus Torvalds }
24642c69e205SMatthew Wilcox (Oracle) EXPORT_SYMBOL(block_read_full_folio);
24651da177e4SLinus Torvalds
24661da177e4SLinus Torvalds /* utility function for filesystems that need to do work on expanding
246789e10787SNick Piggin * truncates. Uses filesystem pagecache writes to allow the filesystem to
24681da177e4SLinus Torvalds * deal with the hole.
24691da177e4SLinus Torvalds */
generic_cont_expand_simple(struct inode * inode,loff_t size)247089e10787SNick Piggin int generic_cont_expand_simple(struct inode *inode, loff_t size)
24711da177e4SLinus Torvalds {
24721da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping;
247353b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
24741da177e4SLinus Torvalds struct page *page;
24751468c6f4SAlexander Potapenko void *fsdata = NULL;
24761da177e4SLinus Torvalds int err;
24771da177e4SLinus Torvalds
2478c08d3b0eSnpiggin@suse.de err = inode_newsize_ok(inode, size);
2479c08d3b0eSnpiggin@suse.de if (err)
24801da177e4SLinus Torvalds goto out;
24811da177e4SLinus Torvalds
248253b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
248389e10787SNick Piggin if (err)
248405eb0b51SOGAWA Hirofumi goto out;
248505eb0b51SOGAWA Hirofumi
248653b524b8SMatthew Wilcox (Oracle) err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
248789e10787SNick Piggin BUG_ON(err > 0);
248805eb0b51SOGAWA Hirofumi
248905eb0b51SOGAWA Hirofumi out:
249005eb0b51SOGAWA Hirofumi return err;
249105eb0b51SOGAWA Hirofumi }
24921fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_cont_expand_simple);
249305eb0b51SOGAWA Hirofumi
cont_expand_zero(struct file * file,struct address_space * mapping,loff_t pos,loff_t * bytes)2494f1e3af72SAdrian Bunk static int cont_expand_zero(struct file *file, struct address_space *mapping,
249589e10787SNick Piggin loff_t pos, loff_t *bytes)
249605eb0b51SOGAWA Hirofumi {
249789e10787SNick Piggin struct inode *inode = mapping->host;
249853b524b8SMatthew Wilcox (Oracle) const struct address_space_operations *aops = mapping->a_ops;
249993407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
250089e10787SNick Piggin struct page *page;
25011468c6f4SAlexander Potapenko void *fsdata = NULL;
250289e10787SNick Piggin pgoff_t index, curidx;
250389e10787SNick Piggin loff_t curpos;
250489e10787SNick Piggin unsigned zerofrom, offset, len;
250589e10787SNick Piggin int err = 0;
250605eb0b51SOGAWA Hirofumi
250709cbfeafSKirill A. Shutemov index = pos >> PAGE_SHIFT;
250809cbfeafSKirill A. Shutemov offset = pos & ~PAGE_MASK;
250989e10787SNick Piggin
251009cbfeafSKirill A. Shutemov while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
251109cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
251289e10787SNick Piggin if (zerofrom & (blocksize-1)) {
251389e10787SNick Piggin *bytes |= (blocksize-1);
251489e10787SNick Piggin (*bytes)++;
251589e10787SNick Piggin }
251609cbfeafSKirill A. Shutemov len = PAGE_SIZE - zerofrom;
251789e10787SNick Piggin
251853b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
251989e10787SNick Piggin &page, &fsdata);
252089e10787SNick Piggin if (err)
252189e10787SNick Piggin goto out;
2522eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len);
252353b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
252489e10787SNick Piggin page, fsdata);
252589e10787SNick Piggin if (err < 0)
252689e10787SNick Piggin goto out;
252789e10787SNick Piggin BUG_ON(err != len);
252889e10787SNick Piggin err = 0;
2529061e9746SOGAWA Hirofumi
2530061e9746SOGAWA Hirofumi balance_dirty_pages_ratelimited(mapping);
2531c2ca0fcdSMikulas Patocka
253208d405c8SDavidlohr Bueso if (fatal_signal_pending(current)) {
2533c2ca0fcdSMikulas Patocka err = -EINTR;
2534c2ca0fcdSMikulas Patocka goto out;
2535c2ca0fcdSMikulas Patocka }
253689e10787SNick Piggin }
253789e10787SNick Piggin
253889e10787SNick Piggin /* page covers the boundary, find the boundary offset */
253989e10787SNick Piggin if (index == curidx) {
254009cbfeafSKirill A. Shutemov zerofrom = curpos & ~PAGE_MASK;
254189e10787SNick Piggin /* if we will expand the thing last block will be filled */
254289e10787SNick Piggin if (offset <= zerofrom) {
254389e10787SNick Piggin goto out;
254489e10787SNick Piggin }
254589e10787SNick Piggin if (zerofrom & (blocksize-1)) {
254689e10787SNick Piggin *bytes |= (blocksize-1);
254789e10787SNick Piggin (*bytes)++;
254889e10787SNick Piggin }
254989e10787SNick Piggin len = offset - zerofrom;
255089e10787SNick Piggin
255153b524b8SMatthew Wilcox (Oracle) err = aops->write_begin(file, mapping, curpos, len,
255289e10787SNick Piggin &page, &fsdata);
255389e10787SNick Piggin if (err)
255489e10787SNick Piggin goto out;
2555eebd2aa3SChristoph Lameter zero_user(page, zerofrom, len);
255653b524b8SMatthew Wilcox (Oracle) err = aops->write_end(file, mapping, curpos, len, len,
255789e10787SNick Piggin page, fsdata);
255889e10787SNick Piggin if (err < 0)
255989e10787SNick Piggin goto out;
256089e10787SNick Piggin BUG_ON(err != len);
256189e10787SNick Piggin err = 0;
256289e10787SNick Piggin }
256389e10787SNick Piggin out:
256489e10787SNick Piggin return err;
25651da177e4SLinus Torvalds }
25661da177e4SLinus Torvalds
25671da177e4SLinus Torvalds /*
25681da177e4SLinus Torvalds * For moronic filesystems that do not allow holes in file.
25691da177e4SLinus Torvalds * We may have to extend the file.
25701da177e4SLinus Torvalds */
cont_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct page ** pagep,void ** fsdata,get_block_t * get_block,loff_t * bytes)2571282dc178SChristoph Hellwig int cont_write_begin(struct file *file, struct address_space *mapping,
2572be3bbbc5SMatthew Wilcox (Oracle) loff_t pos, unsigned len,
257389e10787SNick Piggin struct page **pagep, void **fsdata,
257489e10787SNick Piggin get_block_t *get_block, loff_t *bytes)
25751da177e4SLinus Torvalds {
25761da177e4SLinus Torvalds struct inode *inode = mapping->host;
257793407472SFabian Frederick unsigned int blocksize = i_blocksize(inode);
257893407472SFabian Frederick unsigned int zerofrom;
257989e10787SNick Piggin int err;
25801da177e4SLinus Torvalds
258189e10787SNick Piggin err = cont_expand_zero(file, mapping, pos, bytes);
258289e10787SNick Piggin if (err)
2583155130a4SChristoph Hellwig return err;
25841da177e4SLinus Torvalds
258509cbfeafSKirill A. Shutemov zerofrom = *bytes & ~PAGE_MASK;
258689e10787SNick Piggin if (pos+len > *bytes && zerofrom & (blocksize-1)) {
25871da177e4SLinus Torvalds *bytes |= (blocksize-1);
25881da177e4SLinus Torvalds (*bytes)++;
25891da177e4SLinus Torvalds }
25901da177e4SLinus Torvalds
2591b3992d1eSMatthew Wilcox (Oracle) return block_write_begin(mapping, pos, len, pagep, get_block);
25921da177e4SLinus Torvalds }
25931fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(cont_write_begin);
25941da177e4SLinus Torvalds
block_commit_write(struct page * page,unsigned from,unsigned to)2595a524fcfeSBean Huo void block_commit_write(struct page *page, unsigned from, unsigned to)
25961da177e4SLinus Torvalds {
25978c6cb3e3SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
2598489b7e72SBean Huo __block_commit_write(folio, from, to);
25991da177e4SLinus Torvalds }
26001fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_commit_write);
26011da177e4SLinus Torvalds
260254171690SDavid Chinner /*
260354171690SDavid Chinner * block_page_mkwrite() is not allowed to change the file size as it gets
260454171690SDavid Chinner * called from a page fault handler when a page is first dirtied. Hence we must
260554171690SDavid Chinner * be careful to check for EOF conditions here. We set the page up correctly
260654171690SDavid Chinner * for a written page which means we get ENOSPC checking when writing into
260754171690SDavid Chinner * holes and correct delalloc and unwritten extent mapping on filesystems that
260854171690SDavid Chinner * support these features.
260954171690SDavid Chinner *
261054171690SDavid Chinner * We are not allowed to take the i_mutex here so we have to play games to
261154171690SDavid Chinner * protect against truncate races as the page could now be beyond EOF. Because
26127bb46a67Snpiggin@suse.de * truncate writes the inode size before removing pages, once we have the
261354171690SDavid Chinner * page lock we can determine safely if the page is beyond EOF. If it is not
261454171690SDavid Chinner * beyond EOF, then the page is guaranteed safe against truncation until we
261554171690SDavid Chinner * unlock the page.
2616ea13a864SJan Kara *
261714da9200SJan Kara * Direct callers of this function should protect against filesystem freezing
26185c500029SRoss Zwisler * using sb_start_pagefault() - sb_end_pagefault() functions.
261954171690SDavid Chinner */
block_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf,get_block_t get_block)26205c500029SRoss Zwisler int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
262154171690SDavid Chinner get_block_t get_block)
262254171690SDavid Chinner {
2623fe181377SMatthew Wilcox (Oracle) struct folio *folio = page_folio(vmf->page);
2624496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file);
262554171690SDavid Chinner unsigned long end;
262654171690SDavid Chinner loff_t size;
262724da4fabSJan Kara int ret;
262854171690SDavid Chinner
2629fe181377SMatthew Wilcox (Oracle) folio_lock(folio);
263054171690SDavid Chinner size = i_size_read(inode);
2631fe181377SMatthew Wilcox (Oracle) if ((folio->mapping != inode->i_mapping) ||
2632fe181377SMatthew Wilcox (Oracle) (folio_pos(folio) >= size)) {
263324da4fabSJan Kara /* We overload EFAULT to mean page got truncated */
263424da4fabSJan Kara ret = -EFAULT;
263524da4fabSJan Kara goto out_unlock;
263654171690SDavid Chinner }
263754171690SDavid Chinner
2638fe181377SMatthew Wilcox (Oracle) end = folio_size(folio);
2639fe181377SMatthew Wilcox (Oracle) /* folio is wholly or partially inside EOF */
2640fe181377SMatthew Wilcox (Oracle) if (folio_pos(folio) + end > size)
2641fe181377SMatthew Wilcox (Oracle) end = size - folio_pos(folio);
264254171690SDavid Chinner
2643fe181377SMatthew Wilcox (Oracle) ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2644a524fcfeSBean Huo if (unlikely(ret))
264524da4fabSJan Kara goto out_unlock;
2646a524fcfeSBean Huo
2647a524fcfeSBean Huo __block_commit_write(folio, 0, end);
2648a524fcfeSBean Huo
2649fe181377SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
2650fe181377SMatthew Wilcox (Oracle) folio_wait_stable(folio);
265124da4fabSJan Kara return 0;
265224da4fabSJan Kara out_unlock:
2653fe181377SMatthew Wilcox (Oracle) folio_unlock(folio);
265454171690SDavid Chinner return ret;
265554171690SDavid Chinner }
26561fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_page_mkwrite);
26571da177e4SLinus Torvalds
block_truncate_page(struct address_space * mapping,loff_t from,get_block_t * get_block)26581da177e4SLinus Torvalds int block_truncate_page(struct address_space *mapping,
26591da177e4SLinus Torvalds loff_t from, get_block_t *get_block)
26601da177e4SLinus Torvalds {
266109cbfeafSKirill A. Shutemov pgoff_t index = from >> PAGE_SHIFT;
26621da177e4SLinus Torvalds unsigned blocksize;
266354b21a79SAndrew Morton sector_t iblock;
26646d68f644SMatthew Wilcox (Oracle) size_t offset, length, pos;
26651da177e4SLinus Torvalds struct inode *inode = mapping->host;
26666d68f644SMatthew Wilcox (Oracle) struct folio *folio;
26671da177e4SLinus Torvalds struct buffer_head *bh;
2668dc7cb2d2SJiapeng Chong int err = 0;
26691da177e4SLinus Torvalds
267093407472SFabian Frederick blocksize = i_blocksize(inode);
26716d68f644SMatthew Wilcox (Oracle) length = from & (blocksize - 1);
26721da177e4SLinus Torvalds
26731da177e4SLinus Torvalds /* Block boundary? Nothing to do */
26741da177e4SLinus Torvalds if (!length)
26751da177e4SLinus Torvalds return 0;
26761da177e4SLinus Torvalds
26771da177e4SLinus Torvalds length = blocksize - length;
267809cbfeafSKirill A. Shutemov iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
26791da177e4SLinus Torvalds
26806d68f644SMatthew Wilcox (Oracle) folio = filemap_grab_folio(mapping, index);
26816d68f644SMatthew Wilcox (Oracle) if (IS_ERR(folio))
26826d68f644SMatthew Wilcox (Oracle) return PTR_ERR(folio);
26831da177e4SLinus Torvalds
26846d68f644SMatthew Wilcox (Oracle) bh = folio_buffers(folio);
2685*724dc6daSMatthew Wilcox (Oracle) if (!bh)
2686*724dc6daSMatthew Wilcox (Oracle) bh = folio_create_empty_buffers(folio, blocksize, 0);
26871da177e4SLinus Torvalds
26881da177e4SLinus Torvalds /* Find the buffer that contains "offset" */
26896d68f644SMatthew Wilcox (Oracle) offset = offset_in_folio(folio, from);
26901da177e4SLinus Torvalds pos = blocksize;
26911da177e4SLinus Torvalds while (offset >= pos) {
26921da177e4SLinus Torvalds bh = bh->b_this_page;
26931da177e4SLinus Torvalds iblock++;
26941da177e4SLinus Torvalds pos += blocksize;
26951da177e4SLinus Torvalds }
26961da177e4SLinus Torvalds
26971da177e4SLinus Torvalds if (!buffer_mapped(bh)) {
2698b0cf2321SBadari Pulavarty WARN_ON(bh->b_size != blocksize);
26991da177e4SLinus Torvalds err = get_block(inode, iblock, bh, 0);
27001da177e4SLinus Torvalds if (err)
27011da177e4SLinus Torvalds goto unlock;
27021da177e4SLinus Torvalds /* unmapped? It's a hole - nothing to do */
27031da177e4SLinus Torvalds if (!buffer_mapped(bh))
27041da177e4SLinus Torvalds goto unlock;
27051da177e4SLinus Torvalds }
27061da177e4SLinus Torvalds
27071da177e4SLinus Torvalds /* Ok, it's mapped. Make sure it's up-to-date */
27086d68f644SMatthew Wilcox (Oracle) if (folio_test_uptodate(folio))
27091da177e4SLinus Torvalds set_buffer_uptodate(bh);
27101da177e4SLinus Torvalds
271133a266ddSDavid Chinner if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2712e7ea1129SZhang Yi err = bh_read(bh, 0);
27131da177e4SLinus Torvalds /* Uhhuh. Read error. Complain and punt. */
2714e7ea1129SZhang Yi if (err < 0)
27151da177e4SLinus Torvalds goto unlock;
27161da177e4SLinus Torvalds }
27171da177e4SLinus Torvalds
27186d68f644SMatthew Wilcox (Oracle) folio_zero_range(folio, offset, length);
27191da177e4SLinus Torvalds mark_buffer_dirty(bh);
27201da177e4SLinus Torvalds
27211da177e4SLinus Torvalds unlock:
27226d68f644SMatthew Wilcox (Oracle) folio_unlock(folio);
27236d68f644SMatthew Wilcox (Oracle) folio_put(folio);
2724dc7cb2d2SJiapeng Chong
27251da177e4SLinus Torvalds return err;
27261da177e4SLinus Torvalds }
27271fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_truncate_page);
27281da177e4SLinus Torvalds
27291da177e4SLinus Torvalds /*
27301da177e4SLinus Torvalds * The generic ->writepage function for buffer-backed address_spaces
27311da177e4SLinus Torvalds */
block_write_full_page(struct page * page,get_block_t * get_block,struct writeback_control * wbc)27321b938c08SMatthew Wilcox int block_write_full_page(struct page *page, get_block_t *get_block,
27331b938c08SMatthew Wilcox struct writeback_control *wbc)
27341da177e4SLinus Torvalds {
273553418a18SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
2736bb0ea598SMatthew Wilcox (Oracle) struct inode * const inode = folio->mapping->host;
27371da177e4SLinus Torvalds loff_t i_size = i_size_read(inode);
27381da177e4SLinus Torvalds
2739bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully inside i_size? */
2740bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) + folio_size(folio) <= i_size)
274153418a18SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc,
27421b938c08SMatthew Wilcox end_buffer_async_write);
27431da177e4SLinus Torvalds
2744bb0ea598SMatthew Wilcox (Oracle) /* Is the folio fully outside i_size? (truncate in progress) */
2745bb0ea598SMatthew Wilcox (Oracle) if (folio_pos(folio) >= i_size) {
274653418a18SMatthew Wilcox (Oracle) folio_unlock(folio);
27471da177e4SLinus Torvalds return 0; /* don't care */
27481da177e4SLinus Torvalds }
27491da177e4SLinus Torvalds
27501da177e4SLinus Torvalds /*
2751bb0ea598SMatthew Wilcox (Oracle) * The folio straddles i_size. It must be zeroed out on each and every
27522a61aa40SAdam Buchbinder * writepage invocation because it may be mmapped. "A file is mapped
27531da177e4SLinus Torvalds * in multiples of the page size. For a file that is not a multiple of
27541da177e4SLinus Torvalds * the page size, the remaining memory is zeroed when mapped, and
27551da177e4SLinus Torvalds * writes to that region are not written out to the file."
27561da177e4SLinus Torvalds */
2757bb0ea598SMatthew Wilcox (Oracle) folio_zero_segment(folio, offset_in_folio(folio, i_size),
2758bb0ea598SMatthew Wilcox (Oracle) folio_size(folio));
275953418a18SMatthew Wilcox (Oracle) return __block_write_full_folio(inode, folio, get_block, wbc,
276035c80d5fSChris Mason end_buffer_async_write);
276135c80d5fSChris Mason }
27621fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(block_write_full_page);
276335c80d5fSChris Mason
generic_block_bmap(struct address_space * mapping,sector_t block,get_block_t * get_block)27641da177e4SLinus Torvalds sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
27651da177e4SLinus Torvalds get_block_t *get_block)
27661da177e4SLinus Torvalds {
27671da177e4SLinus Torvalds struct inode *inode = mapping->host;
27682a527d68SAlexander Potapenko struct buffer_head tmp = {
27692a527d68SAlexander Potapenko .b_size = i_blocksize(inode),
27702a527d68SAlexander Potapenko };
27712a527d68SAlexander Potapenko
27721da177e4SLinus Torvalds get_block(inode, block, &tmp, 0);
27731da177e4SLinus Torvalds return tmp.b_blocknr;
27741da177e4SLinus Torvalds }
27751fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(generic_block_bmap);
27761da177e4SLinus Torvalds
end_bio_bh_io_sync(struct bio * bio)27774246a0b6SChristoph Hellwig static void end_bio_bh_io_sync(struct bio *bio)
27781da177e4SLinus Torvalds {
27791da177e4SLinus Torvalds struct buffer_head *bh = bio->bi_private;
27801da177e4SLinus Torvalds
2781b7c44ed9SJens Axboe if (unlikely(bio_flagged(bio, BIO_QUIET)))
278208bafc03SKeith Mannthey set_bit(BH_Quiet, &bh->b_state);
278308bafc03SKeith Mannthey
27844e4cbee9SChristoph Hellwig bh->b_end_io(bh, !bio->bi_status);
27851da177e4SLinus Torvalds bio_put(bio);
27861da177e4SLinus Torvalds }
27871da177e4SLinus Torvalds
submit_bh_wbc(blk_opf_t opf,struct buffer_head * bh,struct writeback_control * wbc)27885bdf402aSRitesh Harjani (IBM) static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
27891420c4a5SBart Van Assche struct writeback_control *wbc)
27901da177e4SLinus Torvalds {
27911420c4a5SBart Van Assche const enum req_op op = opf & REQ_OP_MASK;
27921da177e4SLinus Torvalds struct bio *bio;
27931da177e4SLinus Torvalds
27941da177e4SLinus Torvalds BUG_ON(!buffer_locked(bh));
27951da177e4SLinus Torvalds BUG_ON(!buffer_mapped(bh));
27961da177e4SLinus Torvalds BUG_ON(!bh->b_end_io);
27978fb0e342SAneesh Kumar K.V BUG_ON(buffer_delay(bh));
27988fb0e342SAneesh Kumar K.V BUG_ON(buffer_unwritten(bh));
27991da177e4SLinus Torvalds
280048fd4f93SJens Axboe /*
280148fd4f93SJens Axboe * Only clear out a write error when rewriting
28021da177e4SLinus Torvalds */
28032a222ca9SMike Christie if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
28041da177e4SLinus Torvalds clear_buffer_write_io_error(bh);
28051da177e4SLinus Torvalds
280607888c66SChristoph Hellwig if (buffer_meta(bh))
28071420c4a5SBart Van Assche opf |= REQ_META;
280807888c66SChristoph Hellwig if (buffer_prio(bh))
28091420c4a5SBart Van Assche opf |= REQ_PRIO;
281007888c66SChristoph Hellwig
28111420c4a5SBart Van Assche bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
28121da177e4SLinus Torvalds
28134f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
28144f74d15fSEric Biggers
28154f024f37SKent Overstreet bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
28161da177e4SLinus Torvalds
2817741af75dSJohannes Thumshirn __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
28181da177e4SLinus Torvalds
28191da177e4SLinus Torvalds bio->bi_end_io = end_bio_bh_io_sync;
28201da177e4SLinus Torvalds bio->bi_private = bh;
28211da177e4SLinus Torvalds
282283c9c547SMing Lei /* Take care of bh's that straddle the end of the device */
282383c9c547SMing Lei guard_bio_eod(bio);
282483c9c547SMing Lei
2825fd42df30SDennis Zhou if (wbc) {
2826fd42df30SDennis Zhou wbc_init_bio(wbc, bio);
282734e51a5eSTejun Heo wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2828fd42df30SDennis Zhou }
2829fd42df30SDennis Zhou
28304e49ea4aSMike Christie submit_bio(bio);
28311da177e4SLinus Torvalds }
2832bafc0dbaSTejun Heo
submit_bh(blk_opf_t opf,struct buffer_head * bh)28335bdf402aSRitesh Harjani (IBM) void submit_bh(blk_opf_t opf, struct buffer_head *bh)
283471368511SDarrick J. Wong {
28355bdf402aSRitesh Harjani (IBM) submit_bh_wbc(opf, bh, NULL);
283671368511SDarrick J. Wong }
28371fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(submit_bh);
28381da177e4SLinus Torvalds
write_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28393ae72869SBart Van Assche void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28409cb569d6SChristoph Hellwig {
28419cb569d6SChristoph Hellwig lock_buffer(bh);
28429cb569d6SChristoph Hellwig if (!test_clear_buffer_dirty(bh)) {
28439cb569d6SChristoph Hellwig unlock_buffer(bh);
28449cb569d6SChristoph Hellwig return;
28459cb569d6SChristoph Hellwig }
28469cb569d6SChristoph Hellwig bh->b_end_io = end_buffer_write_sync;
28479cb569d6SChristoph Hellwig get_bh(bh);
28481420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | op_flags, bh);
28499cb569d6SChristoph Hellwig }
28509cb569d6SChristoph Hellwig EXPORT_SYMBOL(write_dirty_buffer);
28519cb569d6SChristoph Hellwig
28521da177e4SLinus Torvalds /*
28531da177e4SLinus Torvalds * For a data-integrity writeout, we need to wait upon any in-progress I/O
28541da177e4SLinus Torvalds * and then start new I/O and then wait upon it. The caller must have a ref on
28551da177e4SLinus Torvalds * the buffer_head.
28561da177e4SLinus Torvalds */
__sync_dirty_buffer(struct buffer_head * bh,blk_opf_t op_flags)28573ae72869SBart Van Assche int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
28581da177e4SLinus Torvalds {
28591da177e4SLinus Torvalds WARN_ON(atomic_read(&bh->b_count) < 1);
28601da177e4SLinus Torvalds lock_buffer(bh);
28611da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
2862377254b2SXianting Tian /*
2863377254b2SXianting Tian * The bh should be mapped, but it might not be if the
2864377254b2SXianting Tian * device was hot-removed. Not much we can do but fail the I/O.
2865377254b2SXianting Tian */
2866377254b2SXianting Tian if (!buffer_mapped(bh)) {
2867377254b2SXianting Tian unlock_buffer(bh);
2868377254b2SXianting Tian return -EIO;
2869377254b2SXianting Tian }
2870377254b2SXianting Tian
28711da177e4SLinus Torvalds get_bh(bh);
28721da177e4SLinus Torvalds bh->b_end_io = end_buffer_write_sync;
2873ab620620SRitesh Harjani (IBM) submit_bh(REQ_OP_WRITE | op_flags, bh);
28741da177e4SLinus Torvalds wait_on_buffer(bh);
2875ab620620SRitesh Harjani (IBM) if (!buffer_uptodate(bh))
2876ab620620SRitesh Harjani (IBM) return -EIO;
28771da177e4SLinus Torvalds } else {
28781da177e4SLinus Torvalds unlock_buffer(bh);
28791da177e4SLinus Torvalds }
2880ab620620SRitesh Harjani (IBM) return 0;
28811da177e4SLinus Torvalds }
288287e99511SChristoph Hellwig EXPORT_SYMBOL(__sync_dirty_buffer);
288387e99511SChristoph Hellwig
sync_dirty_buffer(struct buffer_head * bh)288487e99511SChristoph Hellwig int sync_dirty_buffer(struct buffer_head *bh)
288587e99511SChristoph Hellwig {
288670fd7614SChristoph Hellwig return __sync_dirty_buffer(bh, REQ_SYNC);
288787e99511SChristoph Hellwig }
28881fe72eaaSH Hartley Sweeten EXPORT_SYMBOL(sync_dirty_buffer);
28891da177e4SLinus Torvalds
28901da177e4SLinus Torvalds /*
289168189fefSMatthew Wilcox (Oracle) * try_to_free_buffers() checks if all the buffers on this particular folio
28921da177e4SLinus Torvalds * are unused, and releases them if so.
28931da177e4SLinus Torvalds *
28941da177e4SLinus Torvalds * Exclusion against try_to_free_buffers may be obtained by either
289568189fefSMatthew Wilcox (Oracle) * locking the folio or by holding its mapping's private_lock.
28961da177e4SLinus Torvalds *
289768189fefSMatthew Wilcox (Oracle) * If the folio is dirty but all the buffers are clean then we need to
289868189fefSMatthew Wilcox (Oracle) * be sure to mark the folio clean as well. This is because the folio
28991da177e4SLinus Torvalds * may be against a block device, and a later reattachment of buffers
290068189fefSMatthew Wilcox (Oracle) * to a dirty folio will set *all* buffers dirty. Which would corrupt
29011da177e4SLinus Torvalds * filesystem data on the same device.
29021da177e4SLinus Torvalds *
290368189fefSMatthew Wilcox (Oracle) * The same applies to regular filesystem folios: if all the buffers are
290468189fefSMatthew Wilcox (Oracle) * clean then we set the folio clean and proceed. To do that, we require
2905e621900aSMatthew Wilcox (Oracle) * total exclusion from block_dirty_folio(). That is obtained with
29061da177e4SLinus Torvalds * private_lock.
29071da177e4SLinus Torvalds *
29081da177e4SLinus Torvalds * try_to_free_buffers() is non-blocking.
29091da177e4SLinus Torvalds */
buffer_busy(struct buffer_head * bh)29101da177e4SLinus Torvalds static inline int buffer_busy(struct buffer_head *bh)
29111da177e4SLinus Torvalds {
29121da177e4SLinus Torvalds return atomic_read(&bh->b_count) |
29131da177e4SLinus Torvalds (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
29141da177e4SLinus Torvalds }
29151da177e4SLinus Torvalds
291664394763SMatthew Wilcox (Oracle) static bool
drop_buffers(struct folio * folio,struct buffer_head ** buffers_to_free)291764394763SMatthew Wilcox (Oracle) drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
29181da177e4SLinus Torvalds {
291964394763SMatthew Wilcox (Oracle) struct buffer_head *head = folio_buffers(folio);
29201da177e4SLinus Torvalds struct buffer_head *bh;
29211da177e4SLinus Torvalds
29221da177e4SLinus Torvalds bh = head;
29231da177e4SLinus Torvalds do {
29241da177e4SLinus Torvalds if (buffer_busy(bh))
29251da177e4SLinus Torvalds goto failed;
29261da177e4SLinus Torvalds bh = bh->b_this_page;
29271da177e4SLinus Torvalds } while (bh != head);
29281da177e4SLinus Torvalds
29291da177e4SLinus Torvalds do {
29301da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29311da177e4SLinus Torvalds
2932535ee2fbSJan Kara if (bh->b_assoc_map)
29331da177e4SLinus Torvalds __remove_assoc_queue(bh);
29341da177e4SLinus Torvalds bh = next;
29351da177e4SLinus Torvalds } while (bh != head);
29361da177e4SLinus Torvalds *buffers_to_free = head;
293764394763SMatthew Wilcox (Oracle) folio_detach_private(folio);
293864394763SMatthew Wilcox (Oracle) return true;
29391da177e4SLinus Torvalds failed:
294064394763SMatthew Wilcox (Oracle) return false;
29411da177e4SLinus Torvalds }
29421da177e4SLinus Torvalds
try_to_free_buffers(struct folio * folio)294368189fefSMatthew Wilcox (Oracle) bool try_to_free_buffers(struct folio *folio)
29441da177e4SLinus Torvalds {
294568189fefSMatthew Wilcox (Oracle) struct address_space * const mapping = folio->mapping;
29461da177e4SLinus Torvalds struct buffer_head *buffers_to_free = NULL;
294768189fefSMatthew Wilcox (Oracle) bool ret = 0;
29481da177e4SLinus Torvalds
294968189fefSMatthew Wilcox (Oracle) BUG_ON(!folio_test_locked(folio));
295068189fefSMatthew Wilcox (Oracle) if (folio_test_writeback(folio))
295168189fefSMatthew Wilcox (Oracle) return false;
29521da177e4SLinus Torvalds
29531da177e4SLinus Torvalds if (mapping == NULL) { /* can this still happen? */
295464394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
29551da177e4SLinus Torvalds goto out;
29561da177e4SLinus Torvalds }
29571da177e4SLinus Torvalds
29581da177e4SLinus Torvalds spin_lock(&mapping->private_lock);
295964394763SMatthew Wilcox (Oracle) ret = drop_buffers(folio, &buffers_to_free);
2960ecdfc978SLinus Torvalds
2961ecdfc978SLinus Torvalds /*
2962ecdfc978SLinus Torvalds * If the filesystem writes its buffers by hand (eg ext3)
296368189fefSMatthew Wilcox (Oracle) * then we can have clean buffers against a dirty folio. We
296468189fefSMatthew Wilcox (Oracle) * clean the folio here; otherwise the VM will never notice
2965ecdfc978SLinus Torvalds * that the filesystem did any IO at all.
2966ecdfc978SLinus Torvalds *
2967ecdfc978SLinus Torvalds * Also, during truncate, discard_buffer will have marked all
296868189fefSMatthew Wilcox (Oracle) * the folio's buffers clean. We discover that here and clean
296968189fefSMatthew Wilcox (Oracle) * the folio also.
297087df7241SNick Piggin *
297187df7241SNick Piggin * private_lock must be held over this entire operation in order
2972e621900aSMatthew Wilcox (Oracle) * to synchronise against block_dirty_folio and prevent the
297387df7241SNick Piggin * dirty bit from being lost.
2974ecdfc978SLinus Torvalds */
297511f81becSTejun Heo if (ret)
297668189fefSMatthew Wilcox (Oracle) folio_cancel_dirty(folio);
297787df7241SNick Piggin spin_unlock(&mapping->private_lock);
29781da177e4SLinus Torvalds out:
29791da177e4SLinus Torvalds if (buffers_to_free) {
29801da177e4SLinus Torvalds struct buffer_head *bh = buffers_to_free;
29811da177e4SLinus Torvalds
29821da177e4SLinus Torvalds do {
29831da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
29841da177e4SLinus Torvalds free_buffer_head(bh);
29851da177e4SLinus Torvalds bh = next;
29861da177e4SLinus Torvalds } while (bh != buffers_to_free);
29871da177e4SLinus Torvalds }
29881da177e4SLinus Torvalds return ret;
29891da177e4SLinus Torvalds }
29901da177e4SLinus Torvalds EXPORT_SYMBOL(try_to_free_buffers);
29911da177e4SLinus Torvalds
29921da177e4SLinus Torvalds /*
29931da177e4SLinus Torvalds * Buffer-head allocation
29941da177e4SLinus Torvalds */
2995a0a9b043SShai Fultheim static struct kmem_cache *bh_cachep __read_mostly;
29961da177e4SLinus Torvalds
29971da177e4SLinus Torvalds /*
29981da177e4SLinus Torvalds * Once the number of bh's in the machine exceeds this level, we start
29991da177e4SLinus Torvalds * stripping them in writeback.
30001da177e4SLinus Torvalds */
300143be594aSZhang Yanfei static unsigned long max_buffer_heads;
30021da177e4SLinus Torvalds
30031da177e4SLinus Torvalds int buffer_heads_over_limit;
30041da177e4SLinus Torvalds
30051da177e4SLinus Torvalds struct bh_accounting {
30061da177e4SLinus Torvalds int nr; /* Number of live bh's */
30071da177e4SLinus Torvalds int ratelimit; /* Limit cacheline bouncing */
30081da177e4SLinus Torvalds };
30091da177e4SLinus Torvalds
30101da177e4SLinus Torvalds static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
30111da177e4SLinus Torvalds
recalc_bh_state(void)30121da177e4SLinus Torvalds static void recalc_bh_state(void)
30131da177e4SLinus Torvalds {
30141da177e4SLinus Torvalds int i;
30151da177e4SLinus Torvalds int tot = 0;
30161da177e4SLinus Torvalds
3017ee1be862SChristoph Lameter if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
30181da177e4SLinus Torvalds return;
3019c7b92516SChristoph Lameter __this_cpu_write(bh_accounting.ratelimit, 0);
30208a143426SEric Dumazet for_each_online_cpu(i)
30211da177e4SLinus Torvalds tot += per_cpu(bh_accounting, i).nr;
30221da177e4SLinus Torvalds buffer_heads_over_limit = (tot > max_buffer_heads);
30231da177e4SLinus Torvalds }
30241da177e4SLinus Torvalds
alloc_buffer_head(gfp_t gfp_flags)3025dd0fc66fSAl Viro struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
30261da177e4SLinus Torvalds {
3027019b4d12SRichard Kennedy struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
30281da177e4SLinus Torvalds if (ret) {
3029a35afb83SChristoph Lameter INIT_LIST_HEAD(&ret->b_assoc_buffers);
3030f1e67e35SThomas Gleixner spin_lock_init(&ret->b_uptodate_lock);
3031c7b92516SChristoph Lameter preempt_disable();
3032c7b92516SChristoph Lameter __this_cpu_inc(bh_accounting.nr);
30331da177e4SLinus Torvalds recalc_bh_state();
3034c7b92516SChristoph Lameter preempt_enable();
30351da177e4SLinus Torvalds }
30361da177e4SLinus Torvalds return ret;
30371da177e4SLinus Torvalds }
30381da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_buffer_head);
30391da177e4SLinus Torvalds
free_buffer_head(struct buffer_head * bh)30401da177e4SLinus Torvalds void free_buffer_head(struct buffer_head *bh)
30411da177e4SLinus Torvalds {
30421da177e4SLinus Torvalds BUG_ON(!list_empty(&bh->b_assoc_buffers));
30431da177e4SLinus Torvalds kmem_cache_free(bh_cachep, bh);
3044c7b92516SChristoph Lameter preempt_disable();
3045c7b92516SChristoph Lameter __this_cpu_dec(bh_accounting.nr);
30461da177e4SLinus Torvalds recalc_bh_state();
3047c7b92516SChristoph Lameter preempt_enable();
30481da177e4SLinus Torvalds }
30491da177e4SLinus Torvalds EXPORT_SYMBOL(free_buffer_head);
30501da177e4SLinus Torvalds
buffer_exit_cpu_dead(unsigned int cpu)3051fc4d24c9SSebastian Andrzej Siewior static int buffer_exit_cpu_dead(unsigned int cpu)
30521da177e4SLinus Torvalds {
30531da177e4SLinus Torvalds int i;
30541da177e4SLinus Torvalds struct bh_lru *b = &per_cpu(bh_lrus, cpu);
30551da177e4SLinus Torvalds
30561da177e4SLinus Torvalds for (i = 0; i < BH_LRU_SIZE; i++) {
30571da177e4SLinus Torvalds brelse(b->bhs[i]);
30581da177e4SLinus Torvalds b->bhs[i] = NULL;
30591da177e4SLinus Torvalds }
3060c7b92516SChristoph Lameter this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
30618a143426SEric Dumazet per_cpu(bh_accounting, cpu).nr = 0;
3062fc4d24c9SSebastian Andrzej Siewior return 0;
30631da177e4SLinus Torvalds }
30641da177e4SLinus Torvalds
3065389d1b08SAneesh Kumar K.V /**
3066a6b91919SRandy Dunlap * bh_uptodate_or_lock - Test whether the buffer is uptodate
3067389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3068389d1b08SAneesh Kumar K.V *
3069389d1b08SAneesh Kumar K.V * Return true if the buffer is up-to-date and false,
3070389d1b08SAneesh Kumar K.V * with the buffer locked, if not.
3071389d1b08SAneesh Kumar K.V */
bh_uptodate_or_lock(struct buffer_head * bh)3072389d1b08SAneesh Kumar K.V int bh_uptodate_or_lock(struct buffer_head *bh)
3073389d1b08SAneesh Kumar K.V {
3074389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh)) {
3075389d1b08SAneesh Kumar K.V lock_buffer(bh);
3076389d1b08SAneesh Kumar K.V if (!buffer_uptodate(bh))
3077389d1b08SAneesh Kumar K.V return 0;
3078389d1b08SAneesh Kumar K.V unlock_buffer(bh);
3079389d1b08SAneesh Kumar K.V }
3080389d1b08SAneesh Kumar K.V return 1;
3081389d1b08SAneesh Kumar K.V }
3082389d1b08SAneesh Kumar K.V EXPORT_SYMBOL(bh_uptodate_or_lock);
3083389d1b08SAneesh Kumar K.V
3084389d1b08SAneesh Kumar K.V /**
3085fdee117eSZhang Yi * __bh_read - Submit read for a locked buffer
3086389d1b08SAneesh Kumar K.V * @bh: struct buffer_head
3087fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3088fdee117eSZhang Yi * @wait: wait until reading finish
3089389d1b08SAneesh Kumar K.V *
3090fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3091389d1b08SAneesh Kumar K.V */
__bh_read(struct buffer_head * bh,blk_opf_t op_flags,bool wait)3092fdee117eSZhang Yi int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3093389d1b08SAneesh Kumar K.V {
3094fdee117eSZhang Yi int ret = 0;
3095389d1b08SAneesh Kumar K.V
3096fdee117eSZhang Yi BUG_ON(!buffer_locked(bh));
3097389d1b08SAneesh Kumar K.V
3098389d1b08SAneesh Kumar K.V get_bh(bh);
3099389d1b08SAneesh Kumar K.V bh->b_end_io = end_buffer_read_sync;
3100fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3101fdee117eSZhang Yi if (wait) {
3102389d1b08SAneesh Kumar K.V wait_on_buffer(bh);
3103fdee117eSZhang Yi if (!buffer_uptodate(bh))
3104fdee117eSZhang Yi ret = -EIO;
3105389d1b08SAneesh Kumar K.V }
3106fdee117eSZhang Yi return ret;
3107fdee117eSZhang Yi }
3108fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read);
3109fdee117eSZhang Yi
3110fdee117eSZhang Yi /**
3111fdee117eSZhang Yi * __bh_read_batch - Submit read for a batch of unlocked buffers
3112fdee117eSZhang Yi * @nr: entry number of the buffer batch
3113fdee117eSZhang Yi * @bhs: a batch of struct buffer_head
3114fdee117eSZhang Yi * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3115fdee117eSZhang Yi * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3116fdee117eSZhang Yi * buffer that cannot lock.
3117fdee117eSZhang Yi *
3118fdee117eSZhang Yi * Returns zero on success or don't wait, and -EIO on error.
3119fdee117eSZhang Yi */
__bh_read_batch(int nr,struct buffer_head * bhs[],blk_opf_t op_flags,bool force_lock)3120fdee117eSZhang Yi void __bh_read_batch(int nr, struct buffer_head *bhs[],
3121fdee117eSZhang Yi blk_opf_t op_flags, bool force_lock)
3122fdee117eSZhang Yi {
3123fdee117eSZhang Yi int i;
3124fdee117eSZhang Yi
3125fdee117eSZhang Yi for (i = 0; i < nr; i++) {
3126fdee117eSZhang Yi struct buffer_head *bh = bhs[i];
3127fdee117eSZhang Yi
3128fdee117eSZhang Yi if (buffer_uptodate(bh))
3129fdee117eSZhang Yi continue;
3130fdee117eSZhang Yi
3131fdee117eSZhang Yi if (force_lock)
3132fdee117eSZhang Yi lock_buffer(bh);
3133fdee117eSZhang Yi else
3134fdee117eSZhang Yi if (!trylock_buffer(bh))
3135fdee117eSZhang Yi continue;
3136fdee117eSZhang Yi
3137fdee117eSZhang Yi if (buffer_uptodate(bh)) {
3138fdee117eSZhang Yi unlock_buffer(bh);
3139fdee117eSZhang Yi continue;
3140fdee117eSZhang Yi }
3141fdee117eSZhang Yi
3142fdee117eSZhang Yi bh->b_end_io = end_buffer_read_sync;
3143fdee117eSZhang Yi get_bh(bh);
3144fdee117eSZhang Yi submit_bh(REQ_OP_READ | op_flags, bh);
3145fdee117eSZhang Yi }
3146fdee117eSZhang Yi }
3147fdee117eSZhang Yi EXPORT_SYMBOL(__bh_read_batch);
3148389d1b08SAneesh Kumar K.V
buffer_init(void)31491da177e4SLinus Torvalds void __init buffer_init(void)
31501da177e4SLinus Torvalds {
315143be594aSZhang Yanfei unsigned long nrpages;
3152fc4d24c9SSebastian Andrzej Siewior int ret;
31531da177e4SLinus Torvalds
3154b98938c3SChristoph Lameter bh_cachep = kmem_cache_create("buffer_head",
3155b98938c3SChristoph Lameter sizeof(struct buffer_head), 0,
3156b98938c3SChristoph Lameter (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3157b98938c3SChristoph Lameter SLAB_MEM_SPREAD),
3158019b4d12SRichard Kennedy NULL);
31591da177e4SLinus Torvalds
31601da177e4SLinus Torvalds /*
31611da177e4SLinus Torvalds * Limit the bh occupancy to 10% of ZONE_NORMAL
31621da177e4SLinus Torvalds */
31631da177e4SLinus Torvalds nrpages = (nr_free_buffer_pages() * 10) / 100;
31641da177e4SLinus Torvalds max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3165fc4d24c9SSebastian Andrzej Siewior ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3166fc4d24c9SSebastian Andrzej Siewior NULL, buffer_exit_cpu_dead);
3167fc4d24c9SSebastian Andrzej Siewior WARN_ON(ret < 0);
31681da177e4SLinus Torvalds }
3169