xref: /openbmc/linux/fs/buffer.c (revision 26b32974)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7 
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 
53 #include "internal.h"
54 
55 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
57 			  struct writeback_control *wbc);
58 
59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
60 
61 inline void touch_buffer(struct buffer_head *bh)
62 {
63 	trace_block_touch_buffer(bh);
64 	folio_mark_accessed(bh->b_folio);
65 }
66 EXPORT_SYMBOL(touch_buffer);
67 
68 void __lock_buffer(struct buffer_head *bh)
69 {
70 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
71 }
72 EXPORT_SYMBOL(__lock_buffer);
73 
74 void unlock_buffer(struct buffer_head *bh)
75 {
76 	clear_bit_unlock(BH_Lock, &bh->b_state);
77 	smp_mb__after_atomic();
78 	wake_up_bit(&bh->b_state, BH_Lock);
79 }
80 EXPORT_SYMBOL(unlock_buffer);
81 
82 /*
83  * Returns if the folio has dirty or writeback buffers. If all the buffers
84  * are unlocked and clean then the folio_test_dirty information is stale. If
85  * any of the buffers are locked, it is assumed they are locked for IO.
86  */
87 void buffer_check_dirty_writeback(struct folio *folio,
88 				     bool *dirty, bool *writeback)
89 {
90 	struct buffer_head *head, *bh;
91 	*dirty = false;
92 	*writeback = false;
93 
94 	BUG_ON(!folio_test_locked(folio));
95 
96 	head = folio_buffers(folio);
97 	if (!head)
98 		return;
99 
100 	if (folio_test_writeback(folio))
101 		*writeback = true;
102 
103 	bh = head;
104 	do {
105 		if (buffer_locked(bh))
106 			*writeback = true;
107 
108 		if (buffer_dirty(bh))
109 			*dirty = true;
110 
111 		bh = bh->b_this_page;
112 	} while (bh != head);
113 }
114 
115 /*
116  * Block until a buffer comes unlocked.  This doesn't stop it
117  * from becoming locked again - you have to lock it yourself
118  * if you want to preserve its state.
119  */
120 void __wait_on_buffer(struct buffer_head * bh)
121 {
122 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
123 }
124 EXPORT_SYMBOL(__wait_on_buffer);
125 
126 static void buffer_io_error(struct buffer_head *bh, char *msg)
127 {
128 	if (!test_bit(BH_Quiet, &bh->b_state))
129 		printk_ratelimited(KERN_ERR
130 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
131 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
132 }
133 
134 /*
135  * End-of-IO handler helper function which does not touch the bh after
136  * unlocking it.
137  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
138  * a race there is benign: unlock_buffer() only use the bh's address for
139  * hashing after unlocking the buffer, so it doesn't actually touch the bh
140  * itself.
141  */
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
143 {
144 	if (uptodate) {
145 		set_buffer_uptodate(bh);
146 	} else {
147 		/* This happens, due to failed read-ahead attempts. */
148 		clear_buffer_uptodate(bh);
149 	}
150 	unlock_buffer(bh);
151 }
152 
153 /*
154  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
155  * unlock the buffer.
156  */
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
158 {
159 	__end_buffer_read_notouch(bh, uptodate);
160 	put_bh(bh);
161 }
162 EXPORT_SYMBOL(end_buffer_read_sync);
163 
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
165 {
166 	if (uptodate) {
167 		set_buffer_uptodate(bh);
168 	} else {
169 		buffer_io_error(bh, ", lost sync page write");
170 		mark_buffer_write_io_error(bh);
171 		clear_buffer_uptodate(bh);
172 	}
173 	unlock_buffer(bh);
174 	put_bh(bh);
175 }
176 EXPORT_SYMBOL(end_buffer_write_sync);
177 
178 /*
179  * Various filesystems appear to want __find_get_block to be non-blocking.
180  * But it's the page lock which protects the buffers.  To get around this,
181  * we get exclusion from try_to_free_buffers with the blockdev mapping's
182  * private_lock.
183  *
184  * Hack idea: for the blockdev mapping, private_lock contention
185  * may be quite high.  This code could TryLock the page, and if that
186  * succeeds, there is no need to take private_lock.
187  */
188 static struct buffer_head *
189 __find_get_block_slow(struct block_device *bdev, sector_t block)
190 {
191 	struct inode *bd_inode = bdev->bd_inode;
192 	struct address_space *bd_mapping = bd_inode->i_mapping;
193 	struct buffer_head *ret = NULL;
194 	pgoff_t index;
195 	struct buffer_head *bh;
196 	struct buffer_head *head;
197 	struct folio *folio;
198 	int all_mapped = 1;
199 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
200 
201 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
202 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
203 	if (IS_ERR(folio))
204 		goto out;
205 
206 	spin_lock(&bd_mapping->private_lock);
207 	head = folio_buffers(folio);
208 	if (!head)
209 		goto out_unlock;
210 	bh = head;
211 	do {
212 		if (!buffer_mapped(bh))
213 			all_mapped = 0;
214 		else if (bh->b_blocknr == block) {
215 			ret = bh;
216 			get_bh(bh);
217 			goto out_unlock;
218 		}
219 		bh = bh->b_this_page;
220 	} while (bh != head);
221 
222 	/* we might be here because some of the buffers on this page are
223 	 * not mapped.  This is due to various races between
224 	 * file io on the block device and getblk.  It gets dealt with
225 	 * elsewhere, don't buffer_error if we had some unmapped buffers
226 	 */
227 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
228 	if (all_mapped && __ratelimit(&last_warned)) {
229 		printk("__find_get_block_slow() failed. block=%llu, "
230 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
231 		       "device %pg blocksize: %d\n",
232 		       (unsigned long long)block,
233 		       (unsigned long long)bh->b_blocknr,
234 		       bh->b_state, bh->b_size, bdev,
235 		       1 << bd_inode->i_blkbits);
236 	}
237 out_unlock:
238 	spin_unlock(&bd_mapping->private_lock);
239 	folio_put(folio);
240 out:
241 	return ret;
242 }
243 
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
245 {
246 	unsigned long flags;
247 	struct buffer_head *first;
248 	struct buffer_head *tmp;
249 	struct folio *folio;
250 	int folio_uptodate = 1;
251 
252 	BUG_ON(!buffer_async_read(bh));
253 
254 	folio = bh->b_folio;
255 	if (uptodate) {
256 		set_buffer_uptodate(bh);
257 	} else {
258 		clear_buffer_uptodate(bh);
259 		buffer_io_error(bh, ", async page read");
260 		folio_set_error(folio);
261 	}
262 
263 	/*
264 	 * Be _very_ careful from here on. Bad things can happen if
265 	 * two buffer heads end IO at almost the same time and both
266 	 * decide that the page is now completely done.
267 	 */
268 	first = folio_buffers(folio);
269 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
270 	clear_buffer_async_read(bh);
271 	unlock_buffer(bh);
272 	tmp = bh;
273 	do {
274 		if (!buffer_uptodate(tmp))
275 			folio_uptodate = 0;
276 		if (buffer_async_read(tmp)) {
277 			BUG_ON(!buffer_locked(tmp));
278 			goto still_busy;
279 		}
280 		tmp = tmp->b_this_page;
281 	} while (tmp != bh);
282 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283 
284 	/*
285 	 * If all of the buffers are uptodate then we can set the page
286 	 * uptodate.
287 	 */
288 	if (folio_uptodate)
289 		folio_mark_uptodate(folio);
290 	folio_unlock(folio);
291 	return;
292 
293 still_busy:
294 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
295 	return;
296 }
297 
298 struct postprocess_bh_ctx {
299 	struct work_struct work;
300 	struct buffer_head *bh;
301 };
302 
303 static void verify_bh(struct work_struct *work)
304 {
305 	struct postprocess_bh_ctx *ctx =
306 		container_of(work, struct postprocess_bh_ctx, work);
307 	struct buffer_head *bh = ctx->bh;
308 	bool valid;
309 
310 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
311 	end_buffer_async_read(bh, valid);
312 	kfree(ctx);
313 }
314 
315 static bool need_fsverity(struct buffer_head *bh)
316 {
317 	struct folio *folio = bh->b_folio;
318 	struct inode *inode = folio->mapping->host;
319 
320 	return fsverity_active(inode) &&
321 		/* needed by ext4 */
322 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
323 }
324 
325 static void decrypt_bh(struct work_struct *work)
326 {
327 	struct postprocess_bh_ctx *ctx =
328 		container_of(work, struct postprocess_bh_ctx, work);
329 	struct buffer_head *bh = ctx->bh;
330 	int err;
331 
332 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
333 					       bh_offset(bh));
334 	if (err == 0 && need_fsverity(bh)) {
335 		/*
336 		 * We use different work queues for decryption and for verity
337 		 * because verity may require reading metadata pages that need
338 		 * decryption, and we shouldn't recurse to the same workqueue.
339 		 */
340 		INIT_WORK(&ctx->work, verify_bh);
341 		fsverity_enqueue_verify_work(&ctx->work);
342 		return;
343 	}
344 	end_buffer_async_read(bh, err == 0);
345 	kfree(ctx);
346 }
347 
348 /*
349  * I/O completion handler for block_read_full_folio() - pages
350  * which come unlocked at the end of I/O.
351  */
352 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
353 {
354 	struct inode *inode = bh->b_folio->mapping->host;
355 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
356 	bool verify = need_fsverity(bh);
357 
358 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
359 	if (uptodate && (decrypt || verify)) {
360 		struct postprocess_bh_ctx *ctx =
361 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
362 
363 		if (ctx) {
364 			ctx->bh = bh;
365 			if (decrypt) {
366 				INIT_WORK(&ctx->work, decrypt_bh);
367 				fscrypt_enqueue_decrypt_work(&ctx->work);
368 			} else {
369 				INIT_WORK(&ctx->work, verify_bh);
370 				fsverity_enqueue_verify_work(&ctx->work);
371 			}
372 			return;
373 		}
374 		uptodate = 0;
375 	}
376 	end_buffer_async_read(bh, uptodate);
377 }
378 
379 /*
380  * Completion handler for block_write_full_page() - pages which are unlocked
381  * during I/O, and which have PageWriteback cleared upon I/O completion.
382  */
383 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
384 {
385 	unsigned long flags;
386 	struct buffer_head *first;
387 	struct buffer_head *tmp;
388 	struct folio *folio;
389 
390 	BUG_ON(!buffer_async_write(bh));
391 
392 	folio = bh->b_folio;
393 	if (uptodate) {
394 		set_buffer_uptodate(bh);
395 	} else {
396 		buffer_io_error(bh, ", lost async page write");
397 		mark_buffer_write_io_error(bh);
398 		clear_buffer_uptodate(bh);
399 		folio_set_error(folio);
400 	}
401 
402 	first = folio_buffers(folio);
403 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
404 
405 	clear_buffer_async_write(bh);
406 	unlock_buffer(bh);
407 	tmp = bh->b_this_page;
408 	while (tmp != bh) {
409 		if (buffer_async_write(tmp)) {
410 			BUG_ON(!buffer_locked(tmp));
411 			goto still_busy;
412 		}
413 		tmp = tmp->b_this_page;
414 	}
415 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 	folio_end_writeback(folio);
417 	return;
418 
419 still_busy:
420 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
421 	return;
422 }
423 EXPORT_SYMBOL(end_buffer_async_write);
424 
425 /*
426  * If a page's buffers are under async readin (end_buffer_async_read
427  * completion) then there is a possibility that another thread of
428  * control could lock one of the buffers after it has completed
429  * but while some of the other buffers have not completed.  This
430  * locked buffer would confuse end_buffer_async_read() into not unlocking
431  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
432  * that this buffer is not under async I/O.
433  *
434  * The page comes unlocked when it has no locked buffer_async buffers
435  * left.
436  *
437  * PageLocked prevents anyone starting new async I/O reads any of
438  * the buffers.
439  *
440  * PageWriteback is used to prevent simultaneous writeout of the same
441  * page.
442  *
443  * PageLocked prevents anyone from starting writeback of a page which is
444  * under read I/O (PageWriteback is only ever set against a locked page).
445  */
446 static void mark_buffer_async_read(struct buffer_head *bh)
447 {
448 	bh->b_end_io = end_buffer_async_read_io;
449 	set_buffer_async_read(bh);
450 }
451 
452 static void mark_buffer_async_write_endio(struct buffer_head *bh,
453 					  bh_end_io_t *handler)
454 {
455 	bh->b_end_io = handler;
456 	set_buffer_async_write(bh);
457 }
458 
459 void mark_buffer_async_write(struct buffer_head *bh)
460 {
461 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
462 }
463 EXPORT_SYMBOL(mark_buffer_async_write);
464 
465 
466 /*
467  * fs/buffer.c contains helper functions for buffer-backed address space's
468  * fsync functions.  A common requirement for buffer-based filesystems is
469  * that certain data from the backing blockdev needs to be written out for
470  * a successful fsync().  For example, ext2 indirect blocks need to be
471  * written back and waited upon before fsync() returns.
472  *
473  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
474  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
475  * management of a list of dependent buffers at ->i_mapping->private_list.
476  *
477  * Locking is a little subtle: try_to_free_buffers() will remove buffers
478  * from their controlling inode's queue when they are being freed.  But
479  * try_to_free_buffers() will be operating against the *blockdev* mapping
480  * at the time, not against the S_ISREG file which depends on those buffers.
481  * So the locking for private_list is via the private_lock in the address_space
482  * which backs the buffers.  Which is different from the address_space
483  * against which the buffers are listed.  So for a particular address_space,
484  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
485  * mapping->private_list will always be protected by the backing blockdev's
486  * ->private_lock.
487  *
488  * Which introduces a requirement: all buffers on an address_space's
489  * ->private_list must be from the same address_space: the blockdev's.
490  *
491  * address_spaces which do not place buffers at ->private_list via these
492  * utility functions are free to use private_lock and private_list for
493  * whatever they want.  The only requirement is that list_empty(private_list)
494  * be true at clear_inode() time.
495  *
496  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
497  * filesystems should do that.  invalidate_inode_buffers() should just go
498  * BUG_ON(!list_empty).
499  *
500  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
501  * take an address_space, not an inode.  And it should be called
502  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
503  * queued up.
504  *
505  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
506  * list if it is already on a list.  Because if the buffer is on a list,
507  * it *must* already be on the right one.  If not, the filesystem is being
508  * silly.  This will save a ton of locking.  But first we have to ensure
509  * that buffers are taken *off* the old inode's list when they are freed
510  * (presumably in truncate).  That requires careful auditing of all
511  * filesystems (do it inside bforget()).  It could also be done by bringing
512  * b_inode back.
513  */
514 
515 /*
516  * The buffer's backing address_space's private_lock must be held
517  */
518 static void __remove_assoc_queue(struct buffer_head *bh)
519 {
520 	list_del_init(&bh->b_assoc_buffers);
521 	WARN_ON(!bh->b_assoc_map);
522 	bh->b_assoc_map = NULL;
523 }
524 
525 int inode_has_buffers(struct inode *inode)
526 {
527 	return !list_empty(&inode->i_data.private_list);
528 }
529 
530 /*
531  * osync is designed to support O_SYNC io.  It waits synchronously for
532  * all already-submitted IO to complete, but does not queue any new
533  * writes to the disk.
534  *
535  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
536  * as you dirty the buffers, and then use osync_inode_buffers to wait for
537  * completion.  Any other dirty buffers which are not yet queued for
538  * write will not be flushed to disk by the osync.
539  */
540 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 {
542 	struct buffer_head *bh;
543 	struct list_head *p;
544 	int err = 0;
545 
546 	spin_lock(lock);
547 repeat:
548 	list_for_each_prev(p, list) {
549 		bh = BH_ENTRY(p);
550 		if (buffer_locked(bh)) {
551 			get_bh(bh);
552 			spin_unlock(lock);
553 			wait_on_buffer(bh);
554 			if (!buffer_uptodate(bh))
555 				err = -EIO;
556 			brelse(bh);
557 			spin_lock(lock);
558 			goto repeat;
559 		}
560 	}
561 	spin_unlock(lock);
562 	return err;
563 }
564 
565 void emergency_thaw_bdev(struct super_block *sb)
566 {
567 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
568 		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
569 }
570 
571 /**
572  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
573  * @mapping: the mapping which wants those buffers written
574  *
575  * Starts I/O against the buffers at mapping->private_list, and waits upon
576  * that I/O.
577  *
578  * Basically, this is a convenience function for fsync().
579  * @mapping is a file or directory which needs those buffers to be written for
580  * a successful fsync().
581  */
582 int sync_mapping_buffers(struct address_space *mapping)
583 {
584 	struct address_space *buffer_mapping = mapping->private_data;
585 
586 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
587 		return 0;
588 
589 	return fsync_buffers_list(&buffer_mapping->private_lock,
590 					&mapping->private_list);
591 }
592 EXPORT_SYMBOL(sync_mapping_buffers);
593 
594 /*
595  * Called when we've recently written block `bblock', and it is known that
596  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
597  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
598  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
599  */
600 void write_boundary_block(struct block_device *bdev,
601 			sector_t bblock, unsigned blocksize)
602 {
603 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
604 	if (bh) {
605 		if (buffer_dirty(bh))
606 			write_dirty_buffer(bh, 0);
607 		put_bh(bh);
608 	}
609 }
610 
611 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
612 {
613 	struct address_space *mapping = inode->i_mapping;
614 	struct address_space *buffer_mapping = bh->b_folio->mapping;
615 
616 	mark_buffer_dirty(bh);
617 	if (!mapping->private_data) {
618 		mapping->private_data = buffer_mapping;
619 	} else {
620 		BUG_ON(mapping->private_data != buffer_mapping);
621 	}
622 	if (!bh->b_assoc_map) {
623 		spin_lock(&buffer_mapping->private_lock);
624 		list_move_tail(&bh->b_assoc_buffers,
625 				&mapping->private_list);
626 		bh->b_assoc_map = mapping;
627 		spin_unlock(&buffer_mapping->private_lock);
628 	}
629 }
630 EXPORT_SYMBOL(mark_buffer_dirty_inode);
631 
632 /*
633  * Add a page to the dirty page list.
634  *
635  * It is a sad fact of life that this function is called from several places
636  * deeply under spinlocking.  It may not sleep.
637  *
638  * If the page has buffers, the uptodate buffers are set dirty, to preserve
639  * dirty-state coherency between the page and the buffers.  It the page does
640  * not have buffers then when they are later attached they will all be set
641  * dirty.
642  *
643  * The buffers are dirtied before the page is dirtied.  There's a small race
644  * window in which a writepage caller may see the page cleanness but not the
645  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
646  * before the buffers, a concurrent writepage caller could clear the page dirty
647  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
648  * page on the dirty page list.
649  *
650  * We use private_lock to lock against try_to_free_buffers while using the
651  * page's buffer list.  Also use this to protect against clean buffers being
652  * added to the page after it was set dirty.
653  *
654  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
655  * address_space though.
656  */
657 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
658 {
659 	struct buffer_head *head;
660 	bool newly_dirty;
661 
662 	spin_lock(&mapping->private_lock);
663 	head = folio_buffers(folio);
664 	if (head) {
665 		struct buffer_head *bh = head;
666 
667 		do {
668 			set_buffer_dirty(bh);
669 			bh = bh->b_this_page;
670 		} while (bh != head);
671 	}
672 	/*
673 	 * Lock out page's memcg migration to keep PageDirty
674 	 * synchronized with per-memcg dirty page counters.
675 	 */
676 	folio_memcg_lock(folio);
677 	newly_dirty = !folio_test_set_dirty(folio);
678 	spin_unlock(&mapping->private_lock);
679 
680 	if (newly_dirty)
681 		__folio_mark_dirty(folio, mapping, 1);
682 
683 	folio_memcg_unlock(folio);
684 
685 	if (newly_dirty)
686 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
687 
688 	return newly_dirty;
689 }
690 EXPORT_SYMBOL(block_dirty_folio);
691 
692 /*
693  * Write out and wait upon a list of buffers.
694  *
695  * We have conflicting pressures: we want to make sure that all
696  * initially dirty buffers get waited on, but that any subsequently
697  * dirtied buffers don't.  After all, we don't want fsync to last
698  * forever if somebody is actively writing to the file.
699  *
700  * Do this in two main stages: first we copy dirty buffers to a
701  * temporary inode list, queueing the writes as we go.  Then we clean
702  * up, waiting for those writes to complete.
703  *
704  * During this second stage, any subsequent updates to the file may end
705  * up refiling the buffer on the original inode's dirty list again, so
706  * there is a chance we will end up with a buffer queued for write but
707  * not yet completed on that list.  So, as a final cleanup we go through
708  * the osync code to catch these locked, dirty buffers without requeuing
709  * any newly dirty buffers for write.
710  */
711 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
712 {
713 	struct buffer_head *bh;
714 	struct list_head tmp;
715 	struct address_space *mapping;
716 	int err = 0, err2;
717 	struct blk_plug plug;
718 
719 	INIT_LIST_HEAD(&tmp);
720 	blk_start_plug(&plug);
721 
722 	spin_lock(lock);
723 	while (!list_empty(list)) {
724 		bh = BH_ENTRY(list->next);
725 		mapping = bh->b_assoc_map;
726 		__remove_assoc_queue(bh);
727 		/* Avoid race with mark_buffer_dirty_inode() which does
728 		 * a lockless check and we rely on seeing the dirty bit */
729 		smp_mb();
730 		if (buffer_dirty(bh) || buffer_locked(bh)) {
731 			list_add(&bh->b_assoc_buffers, &tmp);
732 			bh->b_assoc_map = mapping;
733 			if (buffer_dirty(bh)) {
734 				get_bh(bh);
735 				spin_unlock(lock);
736 				/*
737 				 * Ensure any pending I/O completes so that
738 				 * write_dirty_buffer() actually writes the
739 				 * current contents - it is a noop if I/O is
740 				 * still in flight on potentially older
741 				 * contents.
742 				 */
743 				write_dirty_buffer(bh, REQ_SYNC);
744 
745 				/*
746 				 * Kick off IO for the previous mapping. Note
747 				 * that we will not run the very last mapping,
748 				 * wait_on_buffer() will do that for us
749 				 * through sync_buffer().
750 				 */
751 				brelse(bh);
752 				spin_lock(lock);
753 			}
754 		}
755 	}
756 
757 	spin_unlock(lock);
758 	blk_finish_plug(&plug);
759 	spin_lock(lock);
760 
761 	while (!list_empty(&tmp)) {
762 		bh = BH_ENTRY(tmp.prev);
763 		get_bh(bh);
764 		mapping = bh->b_assoc_map;
765 		__remove_assoc_queue(bh);
766 		/* Avoid race with mark_buffer_dirty_inode() which does
767 		 * a lockless check and we rely on seeing the dirty bit */
768 		smp_mb();
769 		if (buffer_dirty(bh)) {
770 			list_add(&bh->b_assoc_buffers,
771 				 &mapping->private_list);
772 			bh->b_assoc_map = mapping;
773 		}
774 		spin_unlock(lock);
775 		wait_on_buffer(bh);
776 		if (!buffer_uptodate(bh))
777 			err = -EIO;
778 		brelse(bh);
779 		spin_lock(lock);
780 	}
781 
782 	spin_unlock(lock);
783 	err2 = osync_buffers_list(lock, list);
784 	if (err)
785 		return err;
786 	else
787 		return err2;
788 }
789 
790 /*
791  * Invalidate any and all dirty buffers on a given inode.  We are
792  * probably unmounting the fs, but that doesn't mean we have already
793  * done a sync().  Just drop the buffers from the inode list.
794  *
795  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
796  * assumes that all the buffers are against the blockdev.  Not true
797  * for reiserfs.
798  */
799 void invalidate_inode_buffers(struct inode *inode)
800 {
801 	if (inode_has_buffers(inode)) {
802 		struct address_space *mapping = &inode->i_data;
803 		struct list_head *list = &mapping->private_list;
804 		struct address_space *buffer_mapping = mapping->private_data;
805 
806 		spin_lock(&buffer_mapping->private_lock);
807 		while (!list_empty(list))
808 			__remove_assoc_queue(BH_ENTRY(list->next));
809 		spin_unlock(&buffer_mapping->private_lock);
810 	}
811 }
812 EXPORT_SYMBOL(invalidate_inode_buffers);
813 
814 /*
815  * Remove any clean buffers from the inode's buffer list.  This is called
816  * when we're trying to free the inode itself.  Those buffers can pin it.
817  *
818  * Returns true if all buffers were removed.
819  */
820 int remove_inode_buffers(struct inode *inode)
821 {
822 	int ret = 1;
823 
824 	if (inode_has_buffers(inode)) {
825 		struct address_space *mapping = &inode->i_data;
826 		struct list_head *list = &mapping->private_list;
827 		struct address_space *buffer_mapping = mapping->private_data;
828 
829 		spin_lock(&buffer_mapping->private_lock);
830 		while (!list_empty(list)) {
831 			struct buffer_head *bh = BH_ENTRY(list->next);
832 			if (buffer_dirty(bh)) {
833 				ret = 0;
834 				break;
835 			}
836 			__remove_assoc_queue(bh);
837 		}
838 		spin_unlock(&buffer_mapping->private_lock);
839 	}
840 	return ret;
841 }
842 
843 /*
844  * Create the appropriate buffers when given a folio for data area and
845  * the size of each buffer.. Use the bh->b_this_page linked list to
846  * follow the buffers created.  Return NULL if unable to create more
847  * buffers.
848  *
849  * The retry flag is used to differentiate async IO (paging, swapping)
850  * which may not fail from ordinary buffer allocations.
851  */
852 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
853 					bool retry)
854 {
855 	struct buffer_head *bh, *head;
856 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
857 	long offset;
858 	struct mem_cgroup *memcg, *old_memcg;
859 
860 	if (retry)
861 		gfp |= __GFP_NOFAIL;
862 
863 	/* The folio lock pins the memcg */
864 	memcg = folio_memcg(folio);
865 	old_memcg = set_active_memcg(memcg);
866 
867 	head = NULL;
868 	offset = folio_size(folio);
869 	while ((offset -= size) >= 0) {
870 		bh = alloc_buffer_head(gfp);
871 		if (!bh)
872 			goto no_grow;
873 
874 		bh->b_this_page = head;
875 		bh->b_blocknr = -1;
876 		head = bh;
877 
878 		bh->b_size = size;
879 
880 		/* Link the buffer to its folio */
881 		folio_set_bh(bh, folio, offset);
882 	}
883 out:
884 	set_active_memcg(old_memcg);
885 	return head;
886 /*
887  * In case anything failed, we just free everything we got.
888  */
889 no_grow:
890 	if (head) {
891 		do {
892 			bh = head;
893 			head = head->b_this_page;
894 			free_buffer_head(bh);
895 		} while (head);
896 	}
897 
898 	goto out;
899 }
900 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
901 
902 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 				       bool retry)
904 {
905 	return folio_alloc_buffers(page_folio(page), size, retry);
906 }
907 EXPORT_SYMBOL_GPL(alloc_page_buffers);
908 
909 static inline void link_dev_buffers(struct folio *folio,
910 		struct buffer_head *head)
911 {
912 	struct buffer_head *bh, *tail;
913 
914 	bh = head;
915 	do {
916 		tail = bh;
917 		bh = bh->b_this_page;
918 	} while (bh);
919 	tail->b_this_page = head;
920 	folio_attach_private(folio, head);
921 }
922 
923 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
924 {
925 	sector_t retval = ~((sector_t)0);
926 	loff_t sz = bdev_nr_bytes(bdev);
927 
928 	if (sz) {
929 		unsigned int sizebits = blksize_bits(size);
930 		retval = (sz >> sizebits);
931 	}
932 	return retval;
933 }
934 
935 /*
936  * Initialise the state of a blockdev folio's buffers.
937  */
938 static sector_t folio_init_buffers(struct folio *folio,
939 		struct block_device *bdev, sector_t block, int size)
940 {
941 	struct buffer_head *head = folio_buffers(folio);
942 	struct buffer_head *bh = head;
943 	bool uptodate = folio_test_uptodate(folio);
944 	sector_t end_block = blkdev_max_block(bdev, size);
945 
946 	do {
947 		if (!buffer_mapped(bh)) {
948 			bh->b_end_io = NULL;
949 			bh->b_private = NULL;
950 			bh->b_bdev = bdev;
951 			bh->b_blocknr = block;
952 			if (uptodate)
953 				set_buffer_uptodate(bh);
954 			if (block < end_block)
955 				set_buffer_mapped(bh);
956 		}
957 		block++;
958 		bh = bh->b_this_page;
959 	} while (bh != head);
960 
961 	/*
962 	 * Caller needs to validate requested block against end of device.
963 	 */
964 	return end_block;
965 }
966 
967 /*
968  * Create the page-cache page that contains the requested block.
969  *
970  * This is used purely for blockdev mappings.
971  */
972 static int
973 grow_dev_page(struct block_device *bdev, sector_t block,
974 	      pgoff_t index, int size, int sizebits, gfp_t gfp)
975 {
976 	struct inode *inode = bdev->bd_inode;
977 	struct folio *folio;
978 	struct buffer_head *bh;
979 	sector_t end_block;
980 	int ret = 0;
981 	gfp_t gfp_mask;
982 
983 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
984 
985 	/*
986 	 * XXX: __getblk_slow() can not really deal with failure and
987 	 * will endlessly loop on improvised global reclaim.  Prefer
988 	 * looping in the allocator rather than here, at least that
989 	 * code knows what it's doing.
990 	 */
991 	gfp_mask |= __GFP_NOFAIL;
992 
993 	folio = __filemap_get_folio(inode->i_mapping, index,
994 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
995 
996 	bh = folio_buffers(folio);
997 	if (bh) {
998 		if (bh->b_size == size) {
999 			end_block = folio_init_buffers(folio, bdev,
1000 					(sector_t)index << sizebits, size);
1001 			goto done;
1002 		}
1003 		if (!try_to_free_buffers(folio))
1004 			goto failed;
1005 	}
1006 
1007 	bh = folio_alloc_buffers(folio, size, true);
1008 
1009 	/*
1010 	 * Link the folio to the buffers and initialise them.  Take the
1011 	 * lock to be atomic wrt __find_get_block(), which does not
1012 	 * run under the folio lock.
1013 	 */
1014 	spin_lock(&inode->i_mapping->private_lock);
1015 	link_dev_buffers(folio, bh);
1016 	end_block = folio_init_buffers(folio, bdev,
1017 			(sector_t)index << sizebits, size);
1018 	spin_unlock(&inode->i_mapping->private_lock);
1019 done:
1020 	ret = (block < end_block) ? 1 : -ENXIO;
1021 failed:
1022 	folio_unlock(folio);
1023 	folio_put(folio);
1024 	return ret;
1025 }
1026 
1027 /*
1028  * Create buffers for the specified block device block's page.  If
1029  * that page was dirty, the buffers are set dirty also.
1030  */
1031 static int
1032 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1033 {
1034 	pgoff_t index;
1035 	int sizebits;
1036 
1037 	sizebits = PAGE_SHIFT - __ffs(size);
1038 	index = block >> sizebits;
1039 
1040 	/*
1041 	 * Check for a block which wants to lie outside our maximum possible
1042 	 * pagecache index.  (this comparison is done using sector_t types).
1043 	 */
1044 	if (unlikely(index != block >> sizebits)) {
1045 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1046 			"device %pg\n",
1047 			__func__, (unsigned long long)block,
1048 			bdev);
1049 		return -EIO;
1050 	}
1051 
1052 	/* Create a page with the proper size buffers.. */
1053 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1054 }
1055 
1056 static struct buffer_head *
1057 __getblk_slow(struct block_device *bdev, sector_t block,
1058 	     unsigned size, gfp_t gfp)
1059 {
1060 	/* Size must be multiple of hard sectorsize */
1061 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1062 			(size < 512 || size > PAGE_SIZE))) {
1063 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1064 					size);
1065 		printk(KERN_ERR "logical block size: %d\n",
1066 					bdev_logical_block_size(bdev));
1067 
1068 		dump_stack();
1069 		return NULL;
1070 	}
1071 
1072 	for (;;) {
1073 		struct buffer_head *bh;
1074 		int ret;
1075 
1076 		bh = __find_get_block(bdev, block, size);
1077 		if (bh)
1078 			return bh;
1079 
1080 		ret = grow_buffers(bdev, block, size, gfp);
1081 		if (ret < 0)
1082 			return NULL;
1083 	}
1084 }
1085 
1086 /*
1087  * The relationship between dirty buffers and dirty pages:
1088  *
1089  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1090  * the page is tagged dirty in the page cache.
1091  *
1092  * At all times, the dirtiness of the buffers represents the dirtiness of
1093  * subsections of the page.  If the page has buffers, the page dirty bit is
1094  * merely a hint about the true dirty state.
1095  *
1096  * When a page is set dirty in its entirety, all its buffers are marked dirty
1097  * (if the page has buffers).
1098  *
1099  * When a buffer is marked dirty, its page is dirtied, but the page's other
1100  * buffers are not.
1101  *
1102  * Also.  When blockdev buffers are explicitly read with bread(), they
1103  * individually become uptodate.  But their backing page remains not
1104  * uptodate - even if all of its buffers are uptodate.  A subsequent
1105  * block_read_full_folio() against that folio will discover all the uptodate
1106  * buffers, will set the folio uptodate and will perform no I/O.
1107  */
1108 
1109 /**
1110  * mark_buffer_dirty - mark a buffer_head as needing writeout
1111  * @bh: the buffer_head to mark dirty
1112  *
1113  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1114  * its backing page dirty, then tag the page as dirty in the page cache
1115  * and then attach the address_space's inode to its superblock's dirty
1116  * inode list.
1117  *
1118  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1119  * i_pages lock and mapping->host->i_lock.
1120  */
1121 void mark_buffer_dirty(struct buffer_head *bh)
1122 {
1123 	WARN_ON_ONCE(!buffer_uptodate(bh));
1124 
1125 	trace_block_dirty_buffer(bh);
1126 
1127 	/*
1128 	 * Very *carefully* optimize the it-is-already-dirty case.
1129 	 *
1130 	 * Don't let the final "is it dirty" escape to before we
1131 	 * perhaps modified the buffer.
1132 	 */
1133 	if (buffer_dirty(bh)) {
1134 		smp_mb();
1135 		if (buffer_dirty(bh))
1136 			return;
1137 	}
1138 
1139 	if (!test_set_buffer_dirty(bh)) {
1140 		struct folio *folio = bh->b_folio;
1141 		struct address_space *mapping = NULL;
1142 
1143 		folio_memcg_lock(folio);
1144 		if (!folio_test_set_dirty(folio)) {
1145 			mapping = folio->mapping;
1146 			if (mapping)
1147 				__folio_mark_dirty(folio, mapping, 0);
1148 		}
1149 		folio_memcg_unlock(folio);
1150 		if (mapping)
1151 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1152 	}
1153 }
1154 EXPORT_SYMBOL(mark_buffer_dirty);
1155 
1156 void mark_buffer_write_io_error(struct buffer_head *bh)
1157 {
1158 	struct super_block *sb;
1159 
1160 	set_buffer_write_io_error(bh);
1161 	/* FIXME: do we need to set this in both places? */
1162 	if (bh->b_folio && bh->b_folio->mapping)
1163 		mapping_set_error(bh->b_folio->mapping, -EIO);
1164 	if (bh->b_assoc_map)
1165 		mapping_set_error(bh->b_assoc_map, -EIO);
1166 	rcu_read_lock();
1167 	sb = READ_ONCE(bh->b_bdev->bd_super);
1168 	if (sb)
1169 		errseq_set(&sb->s_wb_err, -EIO);
1170 	rcu_read_unlock();
1171 }
1172 EXPORT_SYMBOL(mark_buffer_write_io_error);
1173 
1174 /*
1175  * Decrement a buffer_head's reference count.  If all buffers against a page
1176  * have zero reference count, are clean and unlocked, and if the page is clean
1177  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1178  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1179  * a page but it ends up not being freed, and buffers may later be reattached).
1180  */
1181 void __brelse(struct buffer_head * buf)
1182 {
1183 	if (atomic_read(&buf->b_count)) {
1184 		put_bh(buf);
1185 		return;
1186 	}
1187 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1188 }
1189 EXPORT_SYMBOL(__brelse);
1190 
1191 /*
1192  * bforget() is like brelse(), except it discards any
1193  * potentially dirty data.
1194  */
1195 void __bforget(struct buffer_head *bh)
1196 {
1197 	clear_buffer_dirty(bh);
1198 	if (bh->b_assoc_map) {
1199 		struct address_space *buffer_mapping = bh->b_folio->mapping;
1200 
1201 		spin_lock(&buffer_mapping->private_lock);
1202 		list_del_init(&bh->b_assoc_buffers);
1203 		bh->b_assoc_map = NULL;
1204 		spin_unlock(&buffer_mapping->private_lock);
1205 	}
1206 	__brelse(bh);
1207 }
1208 EXPORT_SYMBOL(__bforget);
1209 
1210 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1211 {
1212 	lock_buffer(bh);
1213 	if (buffer_uptodate(bh)) {
1214 		unlock_buffer(bh);
1215 		return bh;
1216 	} else {
1217 		get_bh(bh);
1218 		bh->b_end_io = end_buffer_read_sync;
1219 		submit_bh(REQ_OP_READ, bh);
1220 		wait_on_buffer(bh);
1221 		if (buffer_uptodate(bh))
1222 			return bh;
1223 	}
1224 	brelse(bh);
1225 	return NULL;
1226 }
1227 
1228 /*
1229  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1230  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1231  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1232  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1233  * CPU's LRUs at the same time.
1234  *
1235  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1236  * sb_find_get_block().
1237  *
1238  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1239  * a local interrupt disable for that.
1240  */
1241 
1242 #define BH_LRU_SIZE	16
1243 
1244 struct bh_lru {
1245 	struct buffer_head *bhs[BH_LRU_SIZE];
1246 };
1247 
1248 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1249 
1250 #ifdef CONFIG_SMP
1251 #define bh_lru_lock()	local_irq_disable()
1252 #define bh_lru_unlock()	local_irq_enable()
1253 #else
1254 #define bh_lru_lock()	preempt_disable()
1255 #define bh_lru_unlock()	preempt_enable()
1256 #endif
1257 
1258 static inline void check_irqs_on(void)
1259 {
1260 #ifdef irqs_disabled
1261 	BUG_ON(irqs_disabled());
1262 #endif
1263 }
1264 
1265 /*
1266  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1267  * inserted at the front, and the buffer_head at the back if any is evicted.
1268  * Or, if already in the LRU it is moved to the front.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272 	struct buffer_head *evictee = bh;
1273 	struct bh_lru *b;
1274 	int i;
1275 
1276 	check_irqs_on();
1277 	bh_lru_lock();
1278 
1279 	/*
1280 	 * the refcount of buffer_head in bh_lru prevents dropping the
1281 	 * attached page(i.e., try_to_free_buffers) so it could cause
1282 	 * failing page migration.
1283 	 * Skip putting upcoming bh into bh_lru until migration is done.
1284 	 */
1285 	if (lru_cache_disabled()) {
1286 		bh_lru_unlock();
1287 		return;
1288 	}
1289 
1290 	b = this_cpu_ptr(&bh_lrus);
1291 	for (i = 0; i < BH_LRU_SIZE; i++) {
1292 		swap(evictee, b->bhs[i]);
1293 		if (evictee == bh) {
1294 			bh_lru_unlock();
1295 			return;
1296 		}
1297 	}
1298 
1299 	get_bh(bh);
1300 	bh_lru_unlock();
1301 	brelse(evictee);
1302 }
1303 
1304 /*
1305  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1306  */
1307 static struct buffer_head *
1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1309 {
1310 	struct buffer_head *ret = NULL;
1311 	unsigned int i;
1312 
1313 	check_irqs_on();
1314 	bh_lru_lock();
1315 	for (i = 0; i < BH_LRU_SIZE; i++) {
1316 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1317 
1318 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1319 		    bh->b_size == size) {
1320 			if (i) {
1321 				while (i) {
1322 					__this_cpu_write(bh_lrus.bhs[i],
1323 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1324 					i--;
1325 				}
1326 				__this_cpu_write(bh_lrus.bhs[0], bh);
1327 			}
1328 			get_bh(bh);
1329 			ret = bh;
1330 			break;
1331 		}
1332 	}
1333 	bh_lru_unlock();
1334 	return ret;
1335 }
1336 
1337 /*
1338  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1339  * it in the LRU and mark it as accessed.  If it is not present then return
1340  * NULL
1341  */
1342 struct buffer_head *
1343 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1344 {
1345 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1346 
1347 	if (bh == NULL) {
1348 		/* __find_get_block_slow will mark the page accessed */
1349 		bh = __find_get_block_slow(bdev, block);
1350 		if (bh)
1351 			bh_lru_install(bh);
1352 	} else
1353 		touch_buffer(bh);
1354 
1355 	return bh;
1356 }
1357 EXPORT_SYMBOL(__find_get_block);
1358 
1359 /*
1360  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1361  * which corresponds to the passed block_device, block and size. The
1362  * returned buffer has its reference count incremented.
1363  *
1364  * __getblk_gfp() will lock up the machine if grow_dev_page's
1365  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1366  */
1367 struct buffer_head *
1368 __getblk_gfp(struct block_device *bdev, sector_t block,
1369 	     unsigned size, gfp_t gfp)
1370 {
1371 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1372 
1373 	might_sleep();
1374 	if (bh == NULL)
1375 		bh = __getblk_slow(bdev, block, size, gfp);
1376 	return bh;
1377 }
1378 EXPORT_SYMBOL(__getblk_gfp);
1379 
1380 /*
1381  * Do async read-ahead on a buffer..
1382  */
1383 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1384 {
1385 	struct buffer_head *bh = __getblk(bdev, block, size);
1386 	if (likely(bh)) {
1387 		bh_readahead(bh, REQ_RAHEAD);
1388 		brelse(bh);
1389 	}
1390 }
1391 EXPORT_SYMBOL(__breadahead);
1392 
1393 /**
1394  *  __bread_gfp() - reads a specified block and returns the bh
1395  *  @bdev: the block_device to read from
1396  *  @block: number of block
1397  *  @size: size (in bytes) to read
1398  *  @gfp: page allocation flag
1399  *
1400  *  Reads a specified block, and returns buffer head that contains it.
1401  *  The page cache can be allocated from non-movable area
1402  *  not to prevent page migration if you set gfp to zero.
1403  *  It returns NULL if the block was unreadable.
1404  */
1405 struct buffer_head *
1406 __bread_gfp(struct block_device *bdev, sector_t block,
1407 		   unsigned size, gfp_t gfp)
1408 {
1409 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1410 
1411 	if (likely(bh) && !buffer_uptodate(bh))
1412 		bh = __bread_slow(bh);
1413 	return bh;
1414 }
1415 EXPORT_SYMBOL(__bread_gfp);
1416 
1417 static void __invalidate_bh_lrus(struct bh_lru *b)
1418 {
1419 	int i;
1420 
1421 	for (i = 0; i < BH_LRU_SIZE; i++) {
1422 		brelse(b->bhs[i]);
1423 		b->bhs[i] = NULL;
1424 	}
1425 }
1426 /*
1427  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1428  * This doesn't race because it runs in each cpu either in irq
1429  * or with preempt disabled.
1430  */
1431 static void invalidate_bh_lru(void *arg)
1432 {
1433 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1434 
1435 	__invalidate_bh_lrus(b);
1436 	put_cpu_var(bh_lrus);
1437 }
1438 
1439 bool has_bh_in_lru(int cpu, void *dummy)
1440 {
1441 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1442 	int i;
1443 
1444 	for (i = 0; i < BH_LRU_SIZE; i++) {
1445 		if (b->bhs[i])
1446 			return true;
1447 	}
1448 
1449 	return false;
1450 }
1451 
1452 void invalidate_bh_lrus(void)
1453 {
1454 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1455 }
1456 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1457 
1458 /*
1459  * It's called from workqueue context so we need a bh_lru_lock to close
1460  * the race with preemption/irq.
1461  */
1462 void invalidate_bh_lrus_cpu(void)
1463 {
1464 	struct bh_lru *b;
1465 
1466 	bh_lru_lock();
1467 	b = this_cpu_ptr(&bh_lrus);
1468 	__invalidate_bh_lrus(b);
1469 	bh_lru_unlock();
1470 }
1471 
1472 void set_bh_page(struct buffer_head *bh,
1473 		struct page *page, unsigned long offset)
1474 {
1475 	bh->b_page = page;
1476 	BUG_ON(offset >= PAGE_SIZE);
1477 	if (PageHighMem(page))
1478 		/*
1479 		 * This catches illegal uses and preserves the offset:
1480 		 */
1481 		bh->b_data = (char *)(0 + offset);
1482 	else
1483 		bh->b_data = page_address(page) + offset;
1484 }
1485 EXPORT_SYMBOL(set_bh_page);
1486 
1487 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1488 		  unsigned long offset)
1489 {
1490 	bh->b_folio = folio;
1491 	BUG_ON(offset >= folio_size(folio));
1492 	if (folio_test_highmem(folio))
1493 		/*
1494 		 * This catches illegal uses and preserves the offset:
1495 		 */
1496 		bh->b_data = (char *)(0 + offset);
1497 	else
1498 		bh->b_data = folio_address(folio) + offset;
1499 }
1500 EXPORT_SYMBOL(folio_set_bh);
1501 
1502 /*
1503  * Called when truncating a buffer on a page completely.
1504  */
1505 
1506 /* Bits that are cleared during an invalidate */
1507 #define BUFFER_FLAGS_DISCARD \
1508 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1509 	 1 << BH_Delay | 1 << BH_Unwritten)
1510 
1511 static void discard_buffer(struct buffer_head * bh)
1512 {
1513 	unsigned long b_state;
1514 
1515 	lock_buffer(bh);
1516 	clear_buffer_dirty(bh);
1517 	bh->b_bdev = NULL;
1518 	b_state = READ_ONCE(bh->b_state);
1519 	do {
1520 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1521 			      b_state & ~BUFFER_FLAGS_DISCARD));
1522 	unlock_buffer(bh);
1523 }
1524 
1525 /**
1526  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1527  * @folio: The folio which is affected.
1528  * @offset: start of the range to invalidate
1529  * @length: length of the range to invalidate
1530  *
1531  * block_invalidate_folio() is called when all or part of the folio has been
1532  * invalidated by a truncate operation.
1533  *
1534  * block_invalidate_folio() does not have to release all buffers, but it must
1535  * ensure that no dirty buffer is left outside @offset and that no I/O
1536  * is underway against any of the blocks which are outside the truncation
1537  * point.  Because the caller is about to free (and possibly reuse) those
1538  * blocks on-disk.
1539  */
1540 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1541 {
1542 	struct buffer_head *head, *bh, *next;
1543 	size_t curr_off = 0;
1544 	size_t stop = length + offset;
1545 
1546 	BUG_ON(!folio_test_locked(folio));
1547 
1548 	/*
1549 	 * Check for overflow
1550 	 */
1551 	BUG_ON(stop > folio_size(folio) || stop < length);
1552 
1553 	head = folio_buffers(folio);
1554 	if (!head)
1555 		return;
1556 
1557 	bh = head;
1558 	do {
1559 		size_t next_off = curr_off + bh->b_size;
1560 		next = bh->b_this_page;
1561 
1562 		/*
1563 		 * Are we still fully in range ?
1564 		 */
1565 		if (next_off > stop)
1566 			goto out;
1567 
1568 		/*
1569 		 * is this block fully invalidated?
1570 		 */
1571 		if (offset <= curr_off)
1572 			discard_buffer(bh);
1573 		curr_off = next_off;
1574 		bh = next;
1575 	} while (bh != head);
1576 
1577 	/*
1578 	 * We release buffers only if the entire folio is being invalidated.
1579 	 * The get_block cached value has been unconditionally invalidated,
1580 	 * so real IO is not possible anymore.
1581 	 */
1582 	if (length == folio_size(folio))
1583 		filemap_release_folio(folio, 0);
1584 out:
1585 	return;
1586 }
1587 EXPORT_SYMBOL(block_invalidate_folio);
1588 
1589 /*
1590  * We attach and possibly dirty the buffers atomically wrt
1591  * block_dirty_folio() via private_lock.  try_to_free_buffers
1592  * is already excluded via the folio lock.
1593  */
1594 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1595 				unsigned long b_state)
1596 {
1597 	struct buffer_head *bh, *head, *tail;
1598 
1599 	head = folio_alloc_buffers(folio, blocksize, true);
1600 	bh = head;
1601 	do {
1602 		bh->b_state |= b_state;
1603 		tail = bh;
1604 		bh = bh->b_this_page;
1605 	} while (bh);
1606 	tail->b_this_page = head;
1607 
1608 	spin_lock(&folio->mapping->private_lock);
1609 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1610 		bh = head;
1611 		do {
1612 			if (folio_test_dirty(folio))
1613 				set_buffer_dirty(bh);
1614 			if (folio_test_uptodate(folio))
1615 				set_buffer_uptodate(bh);
1616 			bh = bh->b_this_page;
1617 		} while (bh != head);
1618 	}
1619 	folio_attach_private(folio, head);
1620 	spin_unlock(&folio->mapping->private_lock);
1621 }
1622 EXPORT_SYMBOL(folio_create_empty_buffers);
1623 
1624 void create_empty_buffers(struct page *page,
1625 			unsigned long blocksize, unsigned long b_state)
1626 {
1627 	folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1628 }
1629 EXPORT_SYMBOL(create_empty_buffers);
1630 
1631 /**
1632  * clean_bdev_aliases: clean a range of buffers in block device
1633  * @bdev: Block device to clean buffers in
1634  * @block: Start of a range of blocks to clean
1635  * @len: Number of blocks to clean
1636  *
1637  * We are taking a range of blocks for data and we don't want writeback of any
1638  * buffer-cache aliases starting from return from this function and until the
1639  * moment when something will explicitly mark the buffer dirty (hopefully that
1640  * will not happen until we will free that block ;-) We don't even need to mark
1641  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1642  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1643  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1644  * would confuse anyone who might pick it with bread() afterwards...
1645  *
1646  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1647  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1648  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1649  * need to.  That happens here.
1650  */
1651 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1652 {
1653 	struct inode *bd_inode = bdev->bd_inode;
1654 	struct address_space *bd_mapping = bd_inode->i_mapping;
1655 	struct folio_batch fbatch;
1656 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1657 	pgoff_t end;
1658 	int i, count;
1659 	struct buffer_head *bh;
1660 	struct buffer_head *head;
1661 
1662 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1663 	folio_batch_init(&fbatch);
1664 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1665 		count = folio_batch_count(&fbatch);
1666 		for (i = 0; i < count; i++) {
1667 			struct folio *folio = fbatch.folios[i];
1668 
1669 			if (!folio_buffers(folio))
1670 				continue;
1671 			/*
1672 			 * We use folio lock instead of bd_mapping->private_lock
1673 			 * to pin buffers here since we can afford to sleep and
1674 			 * it scales better than a global spinlock lock.
1675 			 */
1676 			folio_lock(folio);
1677 			/* Recheck when the folio is locked which pins bhs */
1678 			head = folio_buffers(folio);
1679 			if (!head)
1680 				goto unlock_page;
1681 			bh = head;
1682 			do {
1683 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1684 					goto next;
1685 				if (bh->b_blocknr >= block + len)
1686 					break;
1687 				clear_buffer_dirty(bh);
1688 				wait_on_buffer(bh);
1689 				clear_buffer_req(bh);
1690 next:
1691 				bh = bh->b_this_page;
1692 			} while (bh != head);
1693 unlock_page:
1694 			folio_unlock(folio);
1695 		}
1696 		folio_batch_release(&fbatch);
1697 		cond_resched();
1698 		/* End of range already reached? */
1699 		if (index > end || !index)
1700 			break;
1701 	}
1702 }
1703 EXPORT_SYMBOL(clean_bdev_aliases);
1704 
1705 /*
1706  * Size is a power-of-two in the range 512..PAGE_SIZE,
1707  * and the case we care about most is PAGE_SIZE.
1708  *
1709  * So this *could* possibly be written with those
1710  * constraints in mind (relevant mostly if some
1711  * architecture has a slow bit-scan instruction)
1712  */
1713 static inline int block_size_bits(unsigned int blocksize)
1714 {
1715 	return ilog2(blocksize);
1716 }
1717 
1718 static struct buffer_head *folio_create_buffers(struct folio *folio,
1719 						struct inode *inode,
1720 						unsigned int b_state)
1721 {
1722 	BUG_ON(!folio_test_locked(folio));
1723 
1724 	if (!folio_buffers(folio))
1725 		folio_create_empty_buffers(folio,
1726 					   1 << READ_ONCE(inode->i_blkbits),
1727 					   b_state);
1728 	return folio_buffers(folio);
1729 }
1730 
1731 /*
1732  * NOTE! All mapped/uptodate combinations are valid:
1733  *
1734  *	Mapped	Uptodate	Meaning
1735  *
1736  *	No	No		"unknown" - must do get_block()
1737  *	No	Yes		"hole" - zero-filled
1738  *	Yes	No		"allocated" - allocated on disk, not read in
1739  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1740  *
1741  * "Dirty" is valid only with the last case (mapped+uptodate).
1742  */
1743 
1744 /*
1745  * While block_write_full_page is writing back the dirty buffers under
1746  * the page lock, whoever dirtied the buffers may decide to clean them
1747  * again at any time.  We handle that by only looking at the buffer
1748  * state inside lock_buffer().
1749  *
1750  * If block_write_full_page() is called for regular writeback
1751  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1752  * locked buffer.   This only can happen if someone has written the buffer
1753  * directly, with submit_bh().  At the address_space level PageWriteback
1754  * prevents this contention from occurring.
1755  *
1756  * If block_write_full_page() is called with wbc->sync_mode ==
1757  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1758  * causes the writes to be flagged as synchronous writes.
1759  */
1760 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1761 			get_block_t *get_block, struct writeback_control *wbc,
1762 			bh_end_io_t *handler)
1763 {
1764 	int err;
1765 	sector_t block;
1766 	sector_t last_block;
1767 	struct buffer_head *bh, *head;
1768 	unsigned int blocksize, bbits;
1769 	int nr_underway = 0;
1770 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1771 
1772 	head = folio_create_buffers(folio, inode,
1773 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1774 
1775 	/*
1776 	 * Be very careful.  We have no exclusion from block_dirty_folio
1777 	 * here, and the (potentially unmapped) buffers may become dirty at
1778 	 * any time.  If a buffer becomes dirty here after we've inspected it
1779 	 * then we just miss that fact, and the folio stays dirty.
1780 	 *
1781 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1782 	 * handle that here by just cleaning them.
1783 	 */
1784 
1785 	bh = head;
1786 	blocksize = bh->b_size;
1787 	bbits = block_size_bits(blocksize);
1788 
1789 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1790 	last_block = (i_size_read(inode) - 1) >> bbits;
1791 
1792 	/*
1793 	 * Get all the dirty buffers mapped to disk addresses and
1794 	 * handle any aliases from the underlying blockdev's mapping.
1795 	 */
1796 	do {
1797 		if (block > last_block) {
1798 			/*
1799 			 * mapped buffers outside i_size will occur, because
1800 			 * this folio can be outside i_size when there is a
1801 			 * truncate in progress.
1802 			 */
1803 			/*
1804 			 * The buffer was zeroed by block_write_full_page()
1805 			 */
1806 			clear_buffer_dirty(bh);
1807 			set_buffer_uptodate(bh);
1808 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1809 			   buffer_dirty(bh)) {
1810 			WARN_ON(bh->b_size != blocksize);
1811 			err = get_block(inode, block, bh, 1);
1812 			if (err)
1813 				goto recover;
1814 			clear_buffer_delay(bh);
1815 			if (buffer_new(bh)) {
1816 				/* blockdev mappings never come here */
1817 				clear_buffer_new(bh);
1818 				clean_bdev_bh_alias(bh);
1819 			}
1820 		}
1821 		bh = bh->b_this_page;
1822 		block++;
1823 	} while (bh != head);
1824 
1825 	do {
1826 		if (!buffer_mapped(bh))
1827 			continue;
1828 		/*
1829 		 * If it's a fully non-blocking write attempt and we cannot
1830 		 * lock the buffer then redirty the folio.  Note that this can
1831 		 * potentially cause a busy-wait loop from writeback threads
1832 		 * and kswapd activity, but those code paths have their own
1833 		 * higher-level throttling.
1834 		 */
1835 		if (wbc->sync_mode != WB_SYNC_NONE) {
1836 			lock_buffer(bh);
1837 		} else if (!trylock_buffer(bh)) {
1838 			folio_redirty_for_writepage(wbc, folio);
1839 			continue;
1840 		}
1841 		if (test_clear_buffer_dirty(bh)) {
1842 			mark_buffer_async_write_endio(bh, handler);
1843 		} else {
1844 			unlock_buffer(bh);
1845 		}
1846 	} while ((bh = bh->b_this_page) != head);
1847 
1848 	/*
1849 	 * The folio and its buffers are protected by the writeback flag,
1850 	 * so we can drop the bh refcounts early.
1851 	 */
1852 	BUG_ON(folio_test_writeback(folio));
1853 	folio_start_writeback(folio);
1854 
1855 	do {
1856 		struct buffer_head *next = bh->b_this_page;
1857 		if (buffer_async_write(bh)) {
1858 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1859 			nr_underway++;
1860 		}
1861 		bh = next;
1862 	} while (bh != head);
1863 	folio_unlock(folio);
1864 
1865 	err = 0;
1866 done:
1867 	if (nr_underway == 0) {
1868 		/*
1869 		 * The folio was marked dirty, but the buffers were
1870 		 * clean.  Someone wrote them back by hand with
1871 		 * write_dirty_buffer/submit_bh.  A rare case.
1872 		 */
1873 		folio_end_writeback(folio);
1874 
1875 		/*
1876 		 * The folio and buffer_heads can be released at any time from
1877 		 * here on.
1878 		 */
1879 	}
1880 	return err;
1881 
1882 recover:
1883 	/*
1884 	 * ENOSPC, or some other error.  We may already have added some
1885 	 * blocks to the file, so we need to write these out to avoid
1886 	 * exposing stale data.
1887 	 * The folio is currently locked and not marked for writeback
1888 	 */
1889 	bh = head;
1890 	/* Recovery: lock and submit the mapped buffers */
1891 	do {
1892 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1893 		    !buffer_delay(bh)) {
1894 			lock_buffer(bh);
1895 			mark_buffer_async_write_endio(bh, handler);
1896 		} else {
1897 			/*
1898 			 * The buffer may have been set dirty during
1899 			 * attachment to a dirty folio.
1900 			 */
1901 			clear_buffer_dirty(bh);
1902 		}
1903 	} while ((bh = bh->b_this_page) != head);
1904 	folio_set_error(folio);
1905 	BUG_ON(folio_test_writeback(folio));
1906 	mapping_set_error(folio->mapping, err);
1907 	folio_start_writeback(folio);
1908 	do {
1909 		struct buffer_head *next = bh->b_this_page;
1910 		if (buffer_async_write(bh)) {
1911 			clear_buffer_dirty(bh);
1912 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1913 			nr_underway++;
1914 		}
1915 		bh = next;
1916 	} while (bh != head);
1917 	folio_unlock(folio);
1918 	goto done;
1919 }
1920 EXPORT_SYMBOL(__block_write_full_folio);
1921 
1922 /*
1923  * If a folio has any new buffers, zero them out here, and mark them uptodate
1924  * and dirty so they'll be written out (in order to prevent uninitialised
1925  * block data from leaking). And clear the new bit.
1926  */
1927 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1928 {
1929 	size_t block_start, block_end;
1930 	struct buffer_head *head, *bh;
1931 
1932 	BUG_ON(!folio_test_locked(folio));
1933 	head = folio_buffers(folio);
1934 	if (!head)
1935 		return;
1936 
1937 	bh = head;
1938 	block_start = 0;
1939 	do {
1940 		block_end = block_start + bh->b_size;
1941 
1942 		if (buffer_new(bh)) {
1943 			if (block_end > from && block_start < to) {
1944 				if (!folio_test_uptodate(folio)) {
1945 					size_t start, xend;
1946 
1947 					start = max(from, block_start);
1948 					xend = min(to, block_end);
1949 
1950 					folio_zero_segment(folio, start, xend);
1951 					set_buffer_uptodate(bh);
1952 				}
1953 
1954 				clear_buffer_new(bh);
1955 				mark_buffer_dirty(bh);
1956 			}
1957 		}
1958 
1959 		block_start = block_end;
1960 		bh = bh->b_this_page;
1961 	} while (bh != head);
1962 }
1963 EXPORT_SYMBOL(folio_zero_new_buffers);
1964 
1965 static void
1966 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1967 		const struct iomap *iomap)
1968 {
1969 	loff_t offset = block << inode->i_blkbits;
1970 
1971 	bh->b_bdev = iomap->bdev;
1972 
1973 	/*
1974 	 * Block points to offset in file we need to map, iomap contains
1975 	 * the offset at which the map starts. If the map ends before the
1976 	 * current block, then do not map the buffer and let the caller
1977 	 * handle it.
1978 	 */
1979 	BUG_ON(offset >= iomap->offset + iomap->length);
1980 
1981 	switch (iomap->type) {
1982 	case IOMAP_HOLE:
1983 		/*
1984 		 * If the buffer is not up to date or beyond the current EOF,
1985 		 * we need to mark it as new to ensure sub-block zeroing is
1986 		 * executed if necessary.
1987 		 */
1988 		if (!buffer_uptodate(bh) ||
1989 		    (offset >= i_size_read(inode)))
1990 			set_buffer_new(bh);
1991 		break;
1992 	case IOMAP_DELALLOC:
1993 		if (!buffer_uptodate(bh) ||
1994 		    (offset >= i_size_read(inode)))
1995 			set_buffer_new(bh);
1996 		set_buffer_uptodate(bh);
1997 		set_buffer_mapped(bh);
1998 		set_buffer_delay(bh);
1999 		break;
2000 	case IOMAP_UNWRITTEN:
2001 		/*
2002 		 * For unwritten regions, we always need to ensure that regions
2003 		 * in the block we are not writing to are zeroed. Mark the
2004 		 * buffer as new to ensure this.
2005 		 */
2006 		set_buffer_new(bh);
2007 		set_buffer_unwritten(bh);
2008 		fallthrough;
2009 	case IOMAP_MAPPED:
2010 		if ((iomap->flags & IOMAP_F_NEW) ||
2011 		    offset >= i_size_read(inode))
2012 			set_buffer_new(bh);
2013 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2014 				inode->i_blkbits;
2015 		set_buffer_mapped(bh);
2016 		break;
2017 	}
2018 }
2019 
2020 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2021 		get_block_t *get_block, const struct iomap *iomap)
2022 {
2023 	unsigned from = pos & (PAGE_SIZE - 1);
2024 	unsigned to = from + len;
2025 	struct inode *inode = folio->mapping->host;
2026 	unsigned block_start, block_end;
2027 	sector_t block;
2028 	int err = 0;
2029 	unsigned blocksize, bbits;
2030 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2031 
2032 	BUG_ON(!folio_test_locked(folio));
2033 	BUG_ON(from > PAGE_SIZE);
2034 	BUG_ON(to > PAGE_SIZE);
2035 	BUG_ON(from > to);
2036 
2037 	head = folio_create_buffers(folio, inode, 0);
2038 	blocksize = head->b_size;
2039 	bbits = block_size_bits(blocksize);
2040 
2041 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2042 
2043 	for(bh = head, block_start = 0; bh != head || !block_start;
2044 	    block++, block_start=block_end, bh = bh->b_this_page) {
2045 		block_end = block_start + blocksize;
2046 		if (block_end <= from || block_start >= to) {
2047 			if (folio_test_uptodate(folio)) {
2048 				if (!buffer_uptodate(bh))
2049 					set_buffer_uptodate(bh);
2050 			}
2051 			continue;
2052 		}
2053 		if (buffer_new(bh))
2054 			clear_buffer_new(bh);
2055 		if (!buffer_mapped(bh)) {
2056 			WARN_ON(bh->b_size != blocksize);
2057 			if (get_block) {
2058 				err = get_block(inode, block, bh, 1);
2059 				if (err)
2060 					break;
2061 			} else {
2062 				iomap_to_bh(inode, block, bh, iomap);
2063 			}
2064 
2065 			if (buffer_new(bh)) {
2066 				clean_bdev_bh_alias(bh);
2067 				if (folio_test_uptodate(folio)) {
2068 					clear_buffer_new(bh);
2069 					set_buffer_uptodate(bh);
2070 					mark_buffer_dirty(bh);
2071 					continue;
2072 				}
2073 				if (block_end > to || block_start < from)
2074 					folio_zero_segments(folio,
2075 						to, block_end,
2076 						block_start, from);
2077 				continue;
2078 			}
2079 		}
2080 		if (folio_test_uptodate(folio)) {
2081 			if (!buffer_uptodate(bh))
2082 				set_buffer_uptodate(bh);
2083 			continue;
2084 		}
2085 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2086 		    !buffer_unwritten(bh) &&
2087 		     (block_start < from || block_end > to)) {
2088 			bh_read_nowait(bh, 0);
2089 			*wait_bh++=bh;
2090 		}
2091 	}
2092 	/*
2093 	 * If we issued read requests - let them complete.
2094 	 */
2095 	while(wait_bh > wait) {
2096 		wait_on_buffer(*--wait_bh);
2097 		if (!buffer_uptodate(*wait_bh))
2098 			err = -EIO;
2099 	}
2100 	if (unlikely(err))
2101 		folio_zero_new_buffers(folio, from, to);
2102 	return err;
2103 }
2104 
2105 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2106 		get_block_t *get_block)
2107 {
2108 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2109 				       NULL);
2110 }
2111 EXPORT_SYMBOL(__block_write_begin);
2112 
2113 static int __block_commit_write(struct inode *inode, struct folio *folio,
2114 		size_t from, size_t to)
2115 {
2116 	size_t block_start, block_end;
2117 	bool partial = false;
2118 	unsigned blocksize;
2119 	struct buffer_head *bh, *head;
2120 
2121 	bh = head = folio_buffers(folio);
2122 	blocksize = bh->b_size;
2123 
2124 	block_start = 0;
2125 	do {
2126 		block_end = block_start + blocksize;
2127 		if (block_end <= from || block_start >= to) {
2128 			if (!buffer_uptodate(bh))
2129 				partial = true;
2130 		} else {
2131 			set_buffer_uptodate(bh);
2132 			mark_buffer_dirty(bh);
2133 		}
2134 		if (buffer_new(bh))
2135 			clear_buffer_new(bh);
2136 
2137 		block_start = block_end;
2138 		bh = bh->b_this_page;
2139 	} while (bh != head);
2140 
2141 	/*
2142 	 * If this is a partial write which happened to make all buffers
2143 	 * uptodate then we can optimize away a bogus read_folio() for
2144 	 * the next read(). Here we 'discover' whether the folio went
2145 	 * uptodate as a result of this (potentially partial) write.
2146 	 */
2147 	if (!partial)
2148 		folio_mark_uptodate(folio);
2149 	return 0;
2150 }
2151 
2152 /*
2153  * block_write_begin takes care of the basic task of block allocation and
2154  * bringing partial write blocks uptodate first.
2155  *
2156  * The filesystem needs to handle block truncation upon failure.
2157  */
2158 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2159 		struct page **pagep, get_block_t *get_block)
2160 {
2161 	pgoff_t index = pos >> PAGE_SHIFT;
2162 	struct page *page;
2163 	int status;
2164 
2165 	page = grab_cache_page_write_begin(mapping, index);
2166 	if (!page)
2167 		return -ENOMEM;
2168 
2169 	status = __block_write_begin(page, pos, len, get_block);
2170 	if (unlikely(status)) {
2171 		unlock_page(page);
2172 		put_page(page);
2173 		page = NULL;
2174 	}
2175 
2176 	*pagep = page;
2177 	return status;
2178 }
2179 EXPORT_SYMBOL(block_write_begin);
2180 
2181 int block_write_end(struct file *file, struct address_space *mapping,
2182 			loff_t pos, unsigned len, unsigned copied,
2183 			struct page *page, void *fsdata)
2184 {
2185 	struct folio *folio = page_folio(page);
2186 	struct inode *inode = mapping->host;
2187 	size_t start = pos - folio_pos(folio);
2188 
2189 	if (unlikely(copied < len)) {
2190 		/*
2191 		 * The buffers that were written will now be uptodate, so
2192 		 * we don't have to worry about a read_folio reading them
2193 		 * and overwriting a partial write. However if we have
2194 		 * encountered a short write and only partially written
2195 		 * into a buffer, it will not be marked uptodate, so a
2196 		 * read_folio might come in and destroy our partial write.
2197 		 *
2198 		 * Do the simplest thing, and just treat any short write to a
2199 		 * non uptodate folio as a zero-length write, and force the
2200 		 * caller to redo the whole thing.
2201 		 */
2202 		if (!folio_test_uptodate(folio))
2203 			copied = 0;
2204 
2205 		folio_zero_new_buffers(folio, start+copied, start+len);
2206 	}
2207 	flush_dcache_folio(folio);
2208 
2209 	/* This could be a short (even 0-length) commit */
2210 	__block_commit_write(inode, folio, start, start + copied);
2211 
2212 	return copied;
2213 }
2214 EXPORT_SYMBOL(block_write_end);
2215 
2216 int generic_write_end(struct file *file, struct address_space *mapping,
2217 			loff_t pos, unsigned len, unsigned copied,
2218 			struct page *page, void *fsdata)
2219 {
2220 	struct inode *inode = mapping->host;
2221 	loff_t old_size = inode->i_size;
2222 	bool i_size_changed = false;
2223 
2224 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2225 
2226 	/*
2227 	 * No need to use i_size_read() here, the i_size cannot change under us
2228 	 * because we hold i_rwsem.
2229 	 *
2230 	 * But it's important to update i_size while still holding page lock:
2231 	 * page writeout could otherwise come in and zero beyond i_size.
2232 	 */
2233 	if (pos + copied > inode->i_size) {
2234 		i_size_write(inode, pos + copied);
2235 		i_size_changed = true;
2236 	}
2237 
2238 	unlock_page(page);
2239 	put_page(page);
2240 
2241 	if (old_size < pos)
2242 		pagecache_isize_extended(inode, old_size, pos);
2243 	/*
2244 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2245 	 * makes the holding time of page lock longer. Second, it forces lock
2246 	 * ordering of page lock and transaction start for journaling
2247 	 * filesystems.
2248 	 */
2249 	if (i_size_changed)
2250 		mark_inode_dirty(inode);
2251 	return copied;
2252 }
2253 EXPORT_SYMBOL(generic_write_end);
2254 
2255 /*
2256  * block_is_partially_uptodate checks whether buffers within a folio are
2257  * uptodate or not.
2258  *
2259  * Returns true if all buffers which correspond to the specified part
2260  * of the folio are uptodate.
2261  */
2262 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2263 {
2264 	unsigned block_start, block_end, blocksize;
2265 	unsigned to;
2266 	struct buffer_head *bh, *head;
2267 	bool ret = true;
2268 
2269 	head = folio_buffers(folio);
2270 	if (!head)
2271 		return false;
2272 	blocksize = head->b_size;
2273 	to = min_t(unsigned, folio_size(folio) - from, count);
2274 	to = from + to;
2275 	if (from < blocksize && to > folio_size(folio) - blocksize)
2276 		return false;
2277 
2278 	bh = head;
2279 	block_start = 0;
2280 	do {
2281 		block_end = block_start + blocksize;
2282 		if (block_end > from && block_start < to) {
2283 			if (!buffer_uptodate(bh)) {
2284 				ret = false;
2285 				break;
2286 			}
2287 			if (block_end >= to)
2288 				break;
2289 		}
2290 		block_start = block_end;
2291 		bh = bh->b_this_page;
2292 	} while (bh != head);
2293 
2294 	return ret;
2295 }
2296 EXPORT_SYMBOL(block_is_partially_uptodate);
2297 
2298 /*
2299  * Generic "read_folio" function for block devices that have the normal
2300  * get_block functionality. This is most of the block device filesystems.
2301  * Reads the folio asynchronously --- the unlock_buffer() and
2302  * set/clear_buffer_uptodate() functions propagate buffer state into the
2303  * folio once IO has completed.
2304  */
2305 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2306 {
2307 	struct inode *inode = folio->mapping->host;
2308 	sector_t iblock, lblock;
2309 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2310 	unsigned int blocksize, bbits;
2311 	int nr, i;
2312 	int fully_mapped = 1;
2313 	bool page_error = false;
2314 	loff_t limit = i_size_read(inode);
2315 
2316 	/* This is needed for ext4. */
2317 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2318 		limit = inode->i_sb->s_maxbytes;
2319 
2320 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2321 
2322 	head = folio_create_buffers(folio, inode, 0);
2323 	blocksize = head->b_size;
2324 	bbits = block_size_bits(blocksize);
2325 
2326 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2327 	lblock = (limit+blocksize-1) >> bbits;
2328 	bh = head;
2329 	nr = 0;
2330 	i = 0;
2331 
2332 	do {
2333 		if (buffer_uptodate(bh))
2334 			continue;
2335 
2336 		if (!buffer_mapped(bh)) {
2337 			int err = 0;
2338 
2339 			fully_mapped = 0;
2340 			if (iblock < lblock) {
2341 				WARN_ON(bh->b_size != blocksize);
2342 				err = get_block(inode, iblock, bh, 0);
2343 				if (err) {
2344 					folio_set_error(folio);
2345 					page_error = true;
2346 				}
2347 			}
2348 			if (!buffer_mapped(bh)) {
2349 				folio_zero_range(folio, i * blocksize,
2350 						blocksize);
2351 				if (!err)
2352 					set_buffer_uptodate(bh);
2353 				continue;
2354 			}
2355 			/*
2356 			 * get_block() might have updated the buffer
2357 			 * synchronously
2358 			 */
2359 			if (buffer_uptodate(bh))
2360 				continue;
2361 		}
2362 		arr[nr++] = bh;
2363 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2364 
2365 	if (fully_mapped)
2366 		folio_set_mappedtodisk(folio);
2367 
2368 	if (!nr) {
2369 		/*
2370 		 * All buffers are uptodate - we can set the folio uptodate
2371 		 * as well. But not if get_block() returned an error.
2372 		 */
2373 		if (!page_error)
2374 			folio_mark_uptodate(folio);
2375 		folio_unlock(folio);
2376 		return 0;
2377 	}
2378 
2379 	/* Stage two: lock the buffers */
2380 	for (i = 0; i < nr; i++) {
2381 		bh = arr[i];
2382 		lock_buffer(bh);
2383 		mark_buffer_async_read(bh);
2384 	}
2385 
2386 	/*
2387 	 * Stage 3: start the IO.  Check for uptodateness
2388 	 * inside the buffer lock in case another process reading
2389 	 * the underlying blockdev brought it uptodate (the sct fix).
2390 	 */
2391 	for (i = 0; i < nr; i++) {
2392 		bh = arr[i];
2393 		if (buffer_uptodate(bh))
2394 			end_buffer_async_read(bh, 1);
2395 		else
2396 			submit_bh(REQ_OP_READ, bh);
2397 	}
2398 	return 0;
2399 }
2400 EXPORT_SYMBOL(block_read_full_folio);
2401 
2402 /* utility function for filesystems that need to do work on expanding
2403  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2404  * deal with the hole.
2405  */
2406 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2407 {
2408 	struct address_space *mapping = inode->i_mapping;
2409 	const struct address_space_operations *aops = mapping->a_ops;
2410 	struct page *page;
2411 	void *fsdata = NULL;
2412 	int err;
2413 
2414 	err = inode_newsize_ok(inode, size);
2415 	if (err)
2416 		goto out;
2417 
2418 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2419 	if (err)
2420 		goto out;
2421 
2422 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2423 	BUG_ON(err > 0);
2424 
2425 out:
2426 	return err;
2427 }
2428 EXPORT_SYMBOL(generic_cont_expand_simple);
2429 
2430 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2431 			    loff_t pos, loff_t *bytes)
2432 {
2433 	struct inode *inode = mapping->host;
2434 	const struct address_space_operations *aops = mapping->a_ops;
2435 	unsigned int blocksize = i_blocksize(inode);
2436 	struct page *page;
2437 	void *fsdata = NULL;
2438 	pgoff_t index, curidx;
2439 	loff_t curpos;
2440 	unsigned zerofrom, offset, len;
2441 	int err = 0;
2442 
2443 	index = pos >> PAGE_SHIFT;
2444 	offset = pos & ~PAGE_MASK;
2445 
2446 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2447 		zerofrom = curpos & ~PAGE_MASK;
2448 		if (zerofrom & (blocksize-1)) {
2449 			*bytes |= (blocksize-1);
2450 			(*bytes)++;
2451 		}
2452 		len = PAGE_SIZE - zerofrom;
2453 
2454 		err = aops->write_begin(file, mapping, curpos, len,
2455 					    &page, &fsdata);
2456 		if (err)
2457 			goto out;
2458 		zero_user(page, zerofrom, len);
2459 		err = aops->write_end(file, mapping, curpos, len, len,
2460 						page, fsdata);
2461 		if (err < 0)
2462 			goto out;
2463 		BUG_ON(err != len);
2464 		err = 0;
2465 
2466 		balance_dirty_pages_ratelimited(mapping);
2467 
2468 		if (fatal_signal_pending(current)) {
2469 			err = -EINTR;
2470 			goto out;
2471 		}
2472 	}
2473 
2474 	/* page covers the boundary, find the boundary offset */
2475 	if (index == curidx) {
2476 		zerofrom = curpos & ~PAGE_MASK;
2477 		/* if we will expand the thing last block will be filled */
2478 		if (offset <= zerofrom) {
2479 			goto out;
2480 		}
2481 		if (zerofrom & (blocksize-1)) {
2482 			*bytes |= (blocksize-1);
2483 			(*bytes)++;
2484 		}
2485 		len = offset - zerofrom;
2486 
2487 		err = aops->write_begin(file, mapping, curpos, len,
2488 					    &page, &fsdata);
2489 		if (err)
2490 			goto out;
2491 		zero_user(page, zerofrom, len);
2492 		err = aops->write_end(file, mapping, curpos, len, len,
2493 						page, fsdata);
2494 		if (err < 0)
2495 			goto out;
2496 		BUG_ON(err != len);
2497 		err = 0;
2498 	}
2499 out:
2500 	return err;
2501 }
2502 
2503 /*
2504  * For moronic filesystems that do not allow holes in file.
2505  * We may have to extend the file.
2506  */
2507 int cont_write_begin(struct file *file, struct address_space *mapping,
2508 			loff_t pos, unsigned len,
2509 			struct page **pagep, void **fsdata,
2510 			get_block_t *get_block, loff_t *bytes)
2511 {
2512 	struct inode *inode = mapping->host;
2513 	unsigned int blocksize = i_blocksize(inode);
2514 	unsigned int zerofrom;
2515 	int err;
2516 
2517 	err = cont_expand_zero(file, mapping, pos, bytes);
2518 	if (err)
2519 		return err;
2520 
2521 	zerofrom = *bytes & ~PAGE_MASK;
2522 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2523 		*bytes |= (blocksize-1);
2524 		(*bytes)++;
2525 	}
2526 
2527 	return block_write_begin(mapping, pos, len, pagep, get_block);
2528 }
2529 EXPORT_SYMBOL(cont_write_begin);
2530 
2531 int block_commit_write(struct page *page, unsigned from, unsigned to)
2532 {
2533 	struct folio *folio = page_folio(page);
2534 	struct inode *inode = folio->mapping->host;
2535 	__block_commit_write(inode, folio, from, to);
2536 	return 0;
2537 }
2538 EXPORT_SYMBOL(block_commit_write);
2539 
2540 /*
2541  * block_page_mkwrite() is not allowed to change the file size as it gets
2542  * called from a page fault handler when a page is first dirtied. Hence we must
2543  * be careful to check for EOF conditions here. We set the page up correctly
2544  * for a written page which means we get ENOSPC checking when writing into
2545  * holes and correct delalloc and unwritten extent mapping on filesystems that
2546  * support these features.
2547  *
2548  * We are not allowed to take the i_mutex here so we have to play games to
2549  * protect against truncate races as the page could now be beyond EOF.  Because
2550  * truncate writes the inode size before removing pages, once we have the
2551  * page lock we can determine safely if the page is beyond EOF. If it is not
2552  * beyond EOF, then the page is guaranteed safe against truncation until we
2553  * unlock the page.
2554  *
2555  * Direct callers of this function should protect against filesystem freezing
2556  * using sb_start_pagefault() - sb_end_pagefault() functions.
2557  */
2558 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2559 			 get_block_t get_block)
2560 {
2561 	struct folio *folio = page_folio(vmf->page);
2562 	struct inode *inode = file_inode(vma->vm_file);
2563 	unsigned long end;
2564 	loff_t size;
2565 	int ret;
2566 
2567 	folio_lock(folio);
2568 	size = i_size_read(inode);
2569 	if ((folio->mapping != inode->i_mapping) ||
2570 	    (folio_pos(folio) >= size)) {
2571 		/* We overload EFAULT to mean page got truncated */
2572 		ret = -EFAULT;
2573 		goto out_unlock;
2574 	}
2575 
2576 	end = folio_size(folio);
2577 	/* folio is wholly or partially inside EOF */
2578 	if (folio_pos(folio) + end > size)
2579 		end = size - folio_pos(folio);
2580 
2581 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2582 	if (!ret)
2583 		ret = __block_commit_write(inode, folio, 0, end);
2584 
2585 	if (unlikely(ret < 0))
2586 		goto out_unlock;
2587 	folio_mark_dirty(folio);
2588 	folio_wait_stable(folio);
2589 	return 0;
2590 out_unlock:
2591 	folio_unlock(folio);
2592 	return ret;
2593 }
2594 EXPORT_SYMBOL(block_page_mkwrite);
2595 
2596 int block_truncate_page(struct address_space *mapping,
2597 			loff_t from, get_block_t *get_block)
2598 {
2599 	pgoff_t index = from >> PAGE_SHIFT;
2600 	unsigned blocksize;
2601 	sector_t iblock;
2602 	size_t offset, length, pos;
2603 	struct inode *inode = mapping->host;
2604 	struct folio *folio;
2605 	struct buffer_head *bh;
2606 	int err = 0;
2607 
2608 	blocksize = i_blocksize(inode);
2609 	length = from & (blocksize - 1);
2610 
2611 	/* Block boundary? Nothing to do */
2612 	if (!length)
2613 		return 0;
2614 
2615 	length = blocksize - length;
2616 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2617 
2618 	folio = filemap_grab_folio(mapping, index);
2619 	if (IS_ERR(folio))
2620 		return PTR_ERR(folio);
2621 
2622 	bh = folio_buffers(folio);
2623 	if (!bh) {
2624 		folio_create_empty_buffers(folio, blocksize, 0);
2625 		bh = folio_buffers(folio);
2626 	}
2627 
2628 	/* Find the buffer that contains "offset" */
2629 	offset = offset_in_folio(folio, from);
2630 	pos = blocksize;
2631 	while (offset >= pos) {
2632 		bh = bh->b_this_page;
2633 		iblock++;
2634 		pos += blocksize;
2635 	}
2636 
2637 	if (!buffer_mapped(bh)) {
2638 		WARN_ON(bh->b_size != blocksize);
2639 		err = get_block(inode, iblock, bh, 0);
2640 		if (err)
2641 			goto unlock;
2642 		/* unmapped? It's a hole - nothing to do */
2643 		if (!buffer_mapped(bh))
2644 			goto unlock;
2645 	}
2646 
2647 	/* Ok, it's mapped. Make sure it's up-to-date */
2648 	if (folio_test_uptodate(folio))
2649 		set_buffer_uptodate(bh);
2650 
2651 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2652 		err = bh_read(bh, 0);
2653 		/* Uhhuh. Read error. Complain and punt. */
2654 		if (err < 0)
2655 			goto unlock;
2656 	}
2657 
2658 	folio_zero_range(folio, offset, length);
2659 	mark_buffer_dirty(bh);
2660 
2661 unlock:
2662 	folio_unlock(folio);
2663 	folio_put(folio);
2664 
2665 	return err;
2666 }
2667 EXPORT_SYMBOL(block_truncate_page);
2668 
2669 /*
2670  * The generic ->writepage function for buffer-backed address_spaces
2671  */
2672 int block_write_full_page(struct page *page, get_block_t *get_block,
2673 			struct writeback_control *wbc)
2674 {
2675 	struct folio *folio = page_folio(page);
2676 	struct inode * const inode = folio->mapping->host;
2677 	loff_t i_size = i_size_read(inode);
2678 
2679 	/* Is the folio fully inside i_size? */
2680 	if (folio_pos(folio) + folio_size(folio) <= i_size)
2681 		return __block_write_full_folio(inode, folio, get_block, wbc,
2682 					       end_buffer_async_write);
2683 
2684 	/* Is the folio fully outside i_size? (truncate in progress) */
2685 	if (folio_pos(folio) >= i_size) {
2686 		folio_unlock(folio);
2687 		return 0; /* don't care */
2688 	}
2689 
2690 	/*
2691 	 * The folio straddles i_size.  It must be zeroed out on each and every
2692 	 * writepage invocation because it may be mmapped.  "A file is mapped
2693 	 * in multiples of the page size.  For a file that is not a multiple of
2694 	 * the page size, the remaining memory is zeroed when mapped, and
2695 	 * writes to that region are not written out to the file."
2696 	 */
2697 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2698 			folio_size(folio));
2699 	return __block_write_full_folio(inode, folio, get_block, wbc,
2700 			end_buffer_async_write);
2701 }
2702 EXPORT_SYMBOL(block_write_full_page);
2703 
2704 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2705 			    get_block_t *get_block)
2706 {
2707 	struct inode *inode = mapping->host;
2708 	struct buffer_head tmp = {
2709 		.b_size = i_blocksize(inode),
2710 	};
2711 
2712 	get_block(inode, block, &tmp, 0);
2713 	return tmp.b_blocknr;
2714 }
2715 EXPORT_SYMBOL(generic_block_bmap);
2716 
2717 static void end_bio_bh_io_sync(struct bio *bio)
2718 {
2719 	struct buffer_head *bh = bio->bi_private;
2720 
2721 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2722 		set_bit(BH_Quiet, &bh->b_state);
2723 
2724 	bh->b_end_io(bh, !bio->bi_status);
2725 	bio_put(bio);
2726 }
2727 
2728 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2729 			  struct writeback_control *wbc)
2730 {
2731 	const enum req_op op = opf & REQ_OP_MASK;
2732 	struct bio *bio;
2733 
2734 	BUG_ON(!buffer_locked(bh));
2735 	BUG_ON(!buffer_mapped(bh));
2736 	BUG_ON(!bh->b_end_io);
2737 	BUG_ON(buffer_delay(bh));
2738 	BUG_ON(buffer_unwritten(bh));
2739 
2740 	/*
2741 	 * Only clear out a write error when rewriting
2742 	 */
2743 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2744 		clear_buffer_write_io_error(bh);
2745 
2746 	if (buffer_meta(bh))
2747 		opf |= REQ_META;
2748 	if (buffer_prio(bh))
2749 		opf |= REQ_PRIO;
2750 
2751 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2752 
2753 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2754 
2755 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2756 
2757 	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2758 
2759 	bio->bi_end_io = end_bio_bh_io_sync;
2760 	bio->bi_private = bh;
2761 
2762 	/* Take care of bh's that straddle the end of the device */
2763 	guard_bio_eod(bio);
2764 
2765 	if (wbc) {
2766 		wbc_init_bio(wbc, bio);
2767 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2768 	}
2769 
2770 	submit_bio(bio);
2771 }
2772 
2773 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2774 {
2775 	submit_bh_wbc(opf, bh, NULL);
2776 }
2777 EXPORT_SYMBOL(submit_bh);
2778 
2779 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2780 {
2781 	lock_buffer(bh);
2782 	if (!test_clear_buffer_dirty(bh)) {
2783 		unlock_buffer(bh);
2784 		return;
2785 	}
2786 	bh->b_end_io = end_buffer_write_sync;
2787 	get_bh(bh);
2788 	submit_bh(REQ_OP_WRITE | op_flags, bh);
2789 }
2790 EXPORT_SYMBOL(write_dirty_buffer);
2791 
2792 /*
2793  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2794  * and then start new I/O and then wait upon it.  The caller must have a ref on
2795  * the buffer_head.
2796  */
2797 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2798 {
2799 	WARN_ON(atomic_read(&bh->b_count) < 1);
2800 	lock_buffer(bh);
2801 	if (test_clear_buffer_dirty(bh)) {
2802 		/*
2803 		 * The bh should be mapped, but it might not be if the
2804 		 * device was hot-removed. Not much we can do but fail the I/O.
2805 		 */
2806 		if (!buffer_mapped(bh)) {
2807 			unlock_buffer(bh);
2808 			return -EIO;
2809 		}
2810 
2811 		get_bh(bh);
2812 		bh->b_end_io = end_buffer_write_sync;
2813 		submit_bh(REQ_OP_WRITE | op_flags, bh);
2814 		wait_on_buffer(bh);
2815 		if (!buffer_uptodate(bh))
2816 			return -EIO;
2817 	} else {
2818 		unlock_buffer(bh);
2819 	}
2820 	return 0;
2821 }
2822 EXPORT_SYMBOL(__sync_dirty_buffer);
2823 
2824 int sync_dirty_buffer(struct buffer_head *bh)
2825 {
2826 	return __sync_dirty_buffer(bh, REQ_SYNC);
2827 }
2828 EXPORT_SYMBOL(sync_dirty_buffer);
2829 
2830 /*
2831  * try_to_free_buffers() checks if all the buffers on this particular folio
2832  * are unused, and releases them if so.
2833  *
2834  * Exclusion against try_to_free_buffers may be obtained by either
2835  * locking the folio or by holding its mapping's private_lock.
2836  *
2837  * If the folio is dirty but all the buffers are clean then we need to
2838  * be sure to mark the folio clean as well.  This is because the folio
2839  * may be against a block device, and a later reattachment of buffers
2840  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2841  * filesystem data on the same device.
2842  *
2843  * The same applies to regular filesystem folios: if all the buffers are
2844  * clean then we set the folio clean and proceed.  To do that, we require
2845  * total exclusion from block_dirty_folio().  That is obtained with
2846  * private_lock.
2847  *
2848  * try_to_free_buffers() is non-blocking.
2849  */
2850 static inline int buffer_busy(struct buffer_head *bh)
2851 {
2852 	return atomic_read(&bh->b_count) |
2853 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2854 }
2855 
2856 static bool
2857 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2858 {
2859 	struct buffer_head *head = folio_buffers(folio);
2860 	struct buffer_head *bh;
2861 
2862 	bh = head;
2863 	do {
2864 		if (buffer_busy(bh))
2865 			goto failed;
2866 		bh = bh->b_this_page;
2867 	} while (bh != head);
2868 
2869 	do {
2870 		struct buffer_head *next = bh->b_this_page;
2871 
2872 		if (bh->b_assoc_map)
2873 			__remove_assoc_queue(bh);
2874 		bh = next;
2875 	} while (bh != head);
2876 	*buffers_to_free = head;
2877 	folio_detach_private(folio);
2878 	return true;
2879 failed:
2880 	return false;
2881 }
2882 
2883 bool try_to_free_buffers(struct folio *folio)
2884 {
2885 	struct address_space * const mapping = folio->mapping;
2886 	struct buffer_head *buffers_to_free = NULL;
2887 	bool ret = 0;
2888 
2889 	BUG_ON(!folio_test_locked(folio));
2890 	if (folio_test_writeback(folio))
2891 		return false;
2892 
2893 	if (mapping == NULL) {		/* can this still happen? */
2894 		ret = drop_buffers(folio, &buffers_to_free);
2895 		goto out;
2896 	}
2897 
2898 	spin_lock(&mapping->private_lock);
2899 	ret = drop_buffers(folio, &buffers_to_free);
2900 
2901 	/*
2902 	 * If the filesystem writes its buffers by hand (eg ext3)
2903 	 * then we can have clean buffers against a dirty folio.  We
2904 	 * clean the folio here; otherwise the VM will never notice
2905 	 * that the filesystem did any IO at all.
2906 	 *
2907 	 * Also, during truncate, discard_buffer will have marked all
2908 	 * the folio's buffers clean.  We discover that here and clean
2909 	 * the folio also.
2910 	 *
2911 	 * private_lock must be held over this entire operation in order
2912 	 * to synchronise against block_dirty_folio and prevent the
2913 	 * dirty bit from being lost.
2914 	 */
2915 	if (ret)
2916 		folio_cancel_dirty(folio);
2917 	spin_unlock(&mapping->private_lock);
2918 out:
2919 	if (buffers_to_free) {
2920 		struct buffer_head *bh = buffers_to_free;
2921 
2922 		do {
2923 			struct buffer_head *next = bh->b_this_page;
2924 			free_buffer_head(bh);
2925 			bh = next;
2926 		} while (bh != buffers_to_free);
2927 	}
2928 	return ret;
2929 }
2930 EXPORT_SYMBOL(try_to_free_buffers);
2931 
2932 /*
2933  * Buffer-head allocation
2934  */
2935 static struct kmem_cache *bh_cachep __read_mostly;
2936 
2937 /*
2938  * Once the number of bh's in the machine exceeds this level, we start
2939  * stripping them in writeback.
2940  */
2941 static unsigned long max_buffer_heads;
2942 
2943 int buffer_heads_over_limit;
2944 
2945 struct bh_accounting {
2946 	int nr;			/* Number of live bh's */
2947 	int ratelimit;		/* Limit cacheline bouncing */
2948 };
2949 
2950 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2951 
2952 static void recalc_bh_state(void)
2953 {
2954 	int i;
2955 	int tot = 0;
2956 
2957 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2958 		return;
2959 	__this_cpu_write(bh_accounting.ratelimit, 0);
2960 	for_each_online_cpu(i)
2961 		tot += per_cpu(bh_accounting, i).nr;
2962 	buffer_heads_over_limit = (tot > max_buffer_heads);
2963 }
2964 
2965 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2966 {
2967 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2968 	if (ret) {
2969 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2970 		spin_lock_init(&ret->b_uptodate_lock);
2971 		preempt_disable();
2972 		__this_cpu_inc(bh_accounting.nr);
2973 		recalc_bh_state();
2974 		preempt_enable();
2975 	}
2976 	return ret;
2977 }
2978 EXPORT_SYMBOL(alloc_buffer_head);
2979 
2980 void free_buffer_head(struct buffer_head *bh)
2981 {
2982 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
2983 	kmem_cache_free(bh_cachep, bh);
2984 	preempt_disable();
2985 	__this_cpu_dec(bh_accounting.nr);
2986 	recalc_bh_state();
2987 	preempt_enable();
2988 }
2989 EXPORT_SYMBOL(free_buffer_head);
2990 
2991 static int buffer_exit_cpu_dead(unsigned int cpu)
2992 {
2993 	int i;
2994 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2995 
2996 	for (i = 0; i < BH_LRU_SIZE; i++) {
2997 		brelse(b->bhs[i]);
2998 		b->bhs[i] = NULL;
2999 	}
3000 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3001 	per_cpu(bh_accounting, cpu).nr = 0;
3002 	return 0;
3003 }
3004 
3005 /**
3006  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3007  * @bh: struct buffer_head
3008  *
3009  * Return true if the buffer is up-to-date and false,
3010  * with the buffer locked, if not.
3011  */
3012 int bh_uptodate_or_lock(struct buffer_head *bh)
3013 {
3014 	if (!buffer_uptodate(bh)) {
3015 		lock_buffer(bh);
3016 		if (!buffer_uptodate(bh))
3017 			return 0;
3018 		unlock_buffer(bh);
3019 	}
3020 	return 1;
3021 }
3022 EXPORT_SYMBOL(bh_uptodate_or_lock);
3023 
3024 /**
3025  * __bh_read - Submit read for a locked buffer
3026  * @bh: struct buffer_head
3027  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3028  * @wait: wait until reading finish
3029  *
3030  * Returns zero on success or don't wait, and -EIO on error.
3031  */
3032 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3033 {
3034 	int ret = 0;
3035 
3036 	BUG_ON(!buffer_locked(bh));
3037 
3038 	get_bh(bh);
3039 	bh->b_end_io = end_buffer_read_sync;
3040 	submit_bh(REQ_OP_READ | op_flags, bh);
3041 	if (wait) {
3042 		wait_on_buffer(bh);
3043 		if (!buffer_uptodate(bh))
3044 			ret = -EIO;
3045 	}
3046 	return ret;
3047 }
3048 EXPORT_SYMBOL(__bh_read);
3049 
3050 /**
3051  * __bh_read_batch - Submit read for a batch of unlocked buffers
3052  * @nr: entry number of the buffer batch
3053  * @bhs: a batch of struct buffer_head
3054  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3055  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3056  *              buffer that cannot lock.
3057  *
3058  * Returns zero on success or don't wait, and -EIO on error.
3059  */
3060 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3061 		     blk_opf_t op_flags, bool force_lock)
3062 {
3063 	int i;
3064 
3065 	for (i = 0; i < nr; i++) {
3066 		struct buffer_head *bh = bhs[i];
3067 
3068 		if (buffer_uptodate(bh))
3069 			continue;
3070 
3071 		if (force_lock)
3072 			lock_buffer(bh);
3073 		else
3074 			if (!trylock_buffer(bh))
3075 				continue;
3076 
3077 		if (buffer_uptodate(bh)) {
3078 			unlock_buffer(bh);
3079 			continue;
3080 		}
3081 
3082 		bh->b_end_io = end_buffer_read_sync;
3083 		get_bh(bh);
3084 		submit_bh(REQ_OP_READ | op_flags, bh);
3085 	}
3086 }
3087 EXPORT_SYMBOL(__bh_read_batch);
3088 
3089 void __init buffer_init(void)
3090 {
3091 	unsigned long nrpages;
3092 	int ret;
3093 
3094 	bh_cachep = kmem_cache_create("buffer_head",
3095 			sizeof(struct buffer_head), 0,
3096 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3097 				SLAB_MEM_SPREAD),
3098 				NULL);
3099 
3100 	/*
3101 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3102 	 */
3103 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3104 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3105 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3106 					NULL, buffer_exit_cpu_dead);
3107 	WARN_ON(ret < 0);
3108 }
3109