xref: /openbmc/linux/fs/buffer.c (revision 6aeadf78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7 
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 
53 #include "internal.h"
54 
55 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
56 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
57 			  struct writeback_control *wbc);
58 
59 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
60 
61 inline void touch_buffer(struct buffer_head *bh)
62 {
63 	trace_block_touch_buffer(bh);
64 	folio_mark_accessed(bh->b_folio);
65 }
66 EXPORT_SYMBOL(touch_buffer);
67 
68 void __lock_buffer(struct buffer_head *bh)
69 {
70 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
71 }
72 EXPORT_SYMBOL(__lock_buffer);
73 
74 void unlock_buffer(struct buffer_head *bh)
75 {
76 	clear_bit_unlock(BH_Lock, &bh->b_state);
77 	smp_mb__after_atomic();
78 	wake_up_bit(&bh->b_state, BH_Lock);
79 }
80 EXPORT_SYMBOL(unlock_buffer);
81 
82 /*
83  * Returns if the folio has dirty or writeback buffers. If all the buffers
84  * are unlocked and clean then the folio_test_dirty information is stale. If
85  * any of the buffers are locked, it is assumed they are locked for IO.
86  */
87 void buffer_check_dirty_writeback(struct folio *folio,
88 				     bool *dirty, bool *writeback)
89 {
90 	struct buffer_head *head, *bh;
91 	*dirty = false;
92 	*writeback = false;
93 
94 	BUG_ON(!folio_test_locked(folio));
95 
96 	head = folio_buffers(folio);
97 	if (!head)
98 		return;
99 
100 	if (folio_test_writeback(folio))
101 		*writeback = true;
102 
103 	bh = head;
104 	do {
105 		if (buffer_locked(bh))
106 			*writeback = true;
107 
108 		if (buffer_dirty(bh))
109 			*dirty = true;
110 
111 		bh = bh->b_this_page;
112 	} while (bh != head);
113 }
114 
115 /*
116  * Block until a buffer comes unlocked.  This doesn't stop it
117  * from becoming locked again - you have to lock it yourself
118  * if you want to preserve its state.
119  */
120 void __wait_on_buffer(struct buffer_head * bh)
121 {
122 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
123 }
124 EXPORT_SYMBOL(__wait_on_buffer);
125 
126 static void buffer_io_error(struct buffer_head *bh, char *msg)
127 {
128 	if (!test_bit(BH_Quiet, &bh->b_state))
129 		printk_ratelimited(KERN_ERR
130 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
131 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
132 }
133 
134 /*
135  * End-of-IO handler helper function which does not touch the bh after
136  * unlocking it.
137  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
138  * a race there is benign: unlock_buffer() only use the bh's address for
139  * hashing after unlocking the buffer, so it doesn't actually touch the bh
140  * itself.
141  */
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
143 {
144 	if (uptodate) {
145 		set_buffer_uptodate(bh);
146 	} else {
147 		/* This happens, due to failed read-ahead attempts. */
148 		clear_buffer_uptodate(bh);
149 	}
150 	unlock_buffer(bh);
151 }
152 
153 /*
154  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
155  * unlock the buffer.
156  */
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
158 {
159 	__end_buffer_read_notouch(bh, uptodate);
160 	put_bh(bh);
161 }
162 EXPORT_SYMBOL(end_buffer_read_sync);
163 
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
165 {
166 	if (uptodate) {
167 		set_buffer_uptodate(bh);
168 	} else {
169 		buffer_io_error(bh, ", lost sync page write");
170 		mark_buffer_write_io_error(bh);
171 		clear_buffer_uptodate(bh);
172 	}
173 	unlock_buffer(bh);
174 	put_bh(bh);
175 }
176 EXPORT_SYMBOL(end_buffer_write_sync);
177 
178 /*
179  * Various filesystems appear to want __find_get_block to be non-blocking.
180  * But it's the page lock which protects the buffers.  To get around this,
181  * we get exclusion from try_to_free_buffers with the blockdev mapping's
182  * private_lock.
183  *
184  * Hack idea: for the blockdev mapping, private_lock contention
185  * may be quite high.  This code could TryLock the page, and if that
186  * succeeds, there is no need to take private_lock.
187  */
188 static struct buffer_head *
189 __find_get_block_slow(struct block_device *bdev, sector_t block)
190 {
191 	struct inode *bd_inode = bdev->bd_inode;
192 	struct address_space *bd_mapping = bd_inode->i_mapping;
193 	struct buffer_head *ret = NULL;
194 	pgoff_t index;
195 	struct buffer_head *bh;
196 	struct buffer_head *head;
197 	struct page *page;
198 	int all_mapped = 1;
199 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
200 
201 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
202 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
203 	if (!page)
204 		goto out;
205 
206 	spin_lock(&bd_mapping->private_lock);
207 	if (!page_has_buffers(page))
208 		goto out_unlock;
209 	head = page_buffers(page);
210 	bh = head;
211 	do {
212 		if (!buffer_mapped(bh))
213 			all_mapped = 0;
214 		else if (bh->b_blocknr == block) {
215 			ret = bh;
216 			get_bh(bh);
217 			goto out_unlock;
218 		}
219 		bh = bh->b_this_page;
220 	} while (bh != head);
221 
222 	/* we might be here because some of the buffers on this page are
223 	 * not mapped.  This is due to various races between
224 	 * file io on the block device and getblk.  It gets dealt with
225 	 * elsewhere, don't buffer_error if we had some unmapped buffers
226 	 */
227 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
228 	if (all_mapped && __ratelimit(&last_warned)) {
229 		printk("__find_get_block_slow() failed. block=%llu, "
230 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
231 		       "device %pg blocksize: %d\n",
232 		       (unsigned long long)block,
233 		       (unsigned long long)bh->b_blocknr,
234 		       bh->b_state, bh->b_size, bdev,
235 		       1 << bd_inode->i_blkbits);
236 	}
237 out_unlock:
238 	spin_unlock(&bd_mapping->private_lock);
239 	put_page(page);
240 out:
241 	return ret;
242 }
243 
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
245 {
246 	unsigned long flags;
247 	struct buffer_head *first;
248 	struct buffer_head *tmp;
249 	struct folio *folio;
250 	int folio_uptodate = 1;
251 
252 	BUG_ON(!buffer_async_read(bh));
253 
254 	folio = bh->b_folio;
255 	if (uptodate) {
256 		set_buffer_uptodate(bh);
257 	} else {
258 		clear_buffer_uptodate(bh);
259 		buffer_io_error(bh, ", async page read");
260 		folio_set_error(folio);
261 	}
262 
263 	/*
264 	 * Be _very_ careful from here on. Bad things can happen if
265 	 * two buffer heads end IO at almost the same time and both
266 	 * decide that the page is now completely done.
267 	 */
268 	first = folio_buffers(folio);
269 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
270 	clear_buffer_async_read(bh);
271 	unlock_buffer(bh);
272 	tmp = bh;
273 	do {
274 		if (!buffer_uptodate(tmp))
275 			folio_uptodate = 0;
276 		if (buffer_async_read(tmp)) {
277 			BUG_ON(!buffer_locked(tmp));
278 			goto still_busy;
279 		}
280 		tmp = tmp->b_this_page;
281 	} while (tmp != bh);
282 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283 
284 	/*
285 	 * If all of the buffers are uptodate then we can set the page
286 	 * uptodate.
287 	 */
288 	if (folio_uptodate)
289 		folio_mark_uptodate(folio);
290 	folio_unlock(folio);
291 	return;
292 
293 still_busy:
294 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
295 	return;
296 }
297 
298 struct postprocess_bh_ctx {
299 	struct work_struct work;
300 	struct buffer_head *bh;
301 };
302 
303 static void verify_bh(struct work_struct *work)
304 {
305 	struct postprocess_bh_ctx *ctx =
306 		container_of(work, struct postprocess_bh_ctx, work);
307 	struct buffer_head *bh = ctx->bh;
308 	bool valid;
309 
310 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
311 	end_buffer_async_read(bh, valid);
312 	kfree(ctx);
313 }
314 
315 static bool need_fsverity(struct buffer_head *bh)
316 {
317 	struct folio *folio = bh->b_folio;
318 	struct inode *inode = folio->mapping->host;
319 
320 	return fsverity_active(inode) &&
321 		/* needed by ext4 */
322 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
323 }
324 
325 static void decrypt_bh(struct work_struct *work)
326 {
327 	struct postprocess_bh_ctx *ctx =
328 		container_of(work, struct postprocess_bh_ctx, work);
329 	struct buffer_head *bh = ctx->bh;
330 	int err;
331 
332 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
333 					       bh_offset(bh));
334 	if (err == 0 && need_fsverity(bh)) {
335 		/*
336 		 * We use different work queues for decryption and for verity
337 		 * because verity may require reading metadata pages that need
338 		 * decryption, and we shouldn't recurse to the same workqueue.
339 		 */
340 		INIT_WORK(&ctx->work, verify_bh);
341 		fsverity_enqueue_verify_work(&ctx->work);
342 		return;
343 	}
344 	end_buffer_async_read(bh, err == 0);
345 	kfree(ctx);
346 }
347 
348 /*
349  * I/O completion handler for block_read_full_folio() - pages
350  * which come unlocked at the end of I/O.
351  */
352 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
353 {
354 	struct inode *inode = bh->b_folio->mapping->host;
355 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
356 	bool verify = need_fsverity(bh);
357 
358 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
359 	if (uptodate && (decrypt || verify)) {
360 		struct postprocess_bh_ctx *ctx =
361 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
362 
363 		if (ctx) {
364 			ctx->bh = bh;
365 			if (decrypt) {
366 				INIT_WORK(&ctx->work, decrypt_bh);
367 				fscrypt_enqueue_decrypt_work(&ctx->work);
368 			} else {
369 				INIT_WORK(&ctx->work, verify_bh);
370 				fsverity_enqueue_verify_work(&ctx->work);
371 			}
372 			return;
373 		}
374 		uptodate = 0;
375 	}
376 	end_buffer_async_read(bh, uptodate);
377 }
378 
379 /*
380  * Completion handler for block_write_full_page() - pages which are unlocked
381  * during I/O, and which have PageWriteback cleared upon I/O completion.
382  */
383 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
384 {
385 	unsigned long flags;
386 	struct buffer_head *first;
387 	struct buffer_head *tmp;
388 	struct folio *folio;
389 
390 	BUG_ON(!buffer_async_write(bh));
391 
392 	folio = bh->b_folio;
393 	if (uptodate) {
394 		set_buffer_uptodate(bh);
395 	} else {
396 		buffer_io_error(bh, ", lost async page write");
397 		mark_buffer_write_io_error(bh);
398 		clear_buffer_uptodate(bh);
399 		folio_set_error(folio);
400 	}
401 
402 	first = folio_buffers(folio);
403 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
404 
405 	clear_buffer_async_write(bh);
406 	unlock_buffer(bh);
407 	tmp = bh->b_this_page;
408 	while (tmp != bh) {
409 		if (buffer_async_write(tmp)) {
410 			BUG_ON(!buffer_locked(tmp));
411 			goto still_busy;
412 		}
413 		tmp = tmp->b_this_page;
414 	}
415 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
416 	folio_end_writeback(folio);
417 	return;
418 
419 still_busy:
420 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
421 	return;
422 }
423 EXPORT_SYMBOL(end_buffer_async_write);
424 
425 /*
426  * If a page's buffers are under async readin (end_buffer_async_read
427  * completion) then there is a possibility that another thread of
428  * control could lock one of the buffers after it has completed
429  * but while some of the other buffers have not completed.  This
430  * locked buffer would confuse end_buffer_async_read() into not unlocking
431  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
432  * that this buffer is not under async I/O.
433  *
434  * The page comes unlocked when it has no locked buffer_async buffers
435  * left.
436  *
437  * PageLocked prevents anyone starting new async I/O reads any of
438  * the buffers.
439  *
440  * PageWriteback is used to prevent simultaneous writeout of the same
441  * page.
442  *
443  * PageLocked prevents anyone from starting writeback of a page which is
444  * under read I/O (PageWriteback is only ever set against a locked page).
445  */
446 static void mark_buffer_async_read(struct buffer_head *bh)
447 {
448 	bh->b_end_io = end_buffer_async_read_io;
449 	set_buffer_async_read(bh);
450 }
451 
452 static void mark_buffer_async_write_endio(struct buffer_head *bh,
453 					  bh_end_io_t *handler)
454 {
455 	bh->b_end_io = handler;
456 	set_buffer_async_write(bh);
457 }
458 
459 void mark_buffer_async_write(struct buffer_head *bh)
460 {
461 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
462 }
463 EXPORT_SYMBOL(mark_buffer_async_write);
464 
465 
466 /*
467  * fs/buffer.c contains helper functions for buffer-backed address space's
468  * fsync functions.  A common requirement for buffer-based filesystems is
469  * that certain data from the backing blockdev needs to be written out for
470  * a successful fsync().  For example, ext2 indirect blocks need to be
471  * written back and waited upon before fsync() returns.
472  *
473  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
474  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
475  * management of a list of dependent buffers at ->i_mapping->private_list.
476  *
477  * Locking is a little subtle: try_to_free_buffers() will remove buffers
478  * from their controlling inode's queue when they are being freed.  But
479  * try_to_free_buffers() will be operating against the *blockdev* mapping
480  * at the time, not against the S_ISREG file which depends on those buffers.
481  * So the locking for private_list is via the private_lock in the address_space
482  * which backs the buffers.  Which is different from the address_space
483  * against which the buffers are listed.  So for a particular address_space,
484  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
485  * mapping->private_list will always be protected by the backing blockdev's
486  * ->private_lock.
487  *
488  * Which introduces a requirement: all buffers on an address_space's
489  * ->private_list must be from the same address_space: the blockdev's.
490  *
491  * address_spaces which do not place buffers at ->private_list via these
492  * utility functions are free to use private_lock and private_list for
493  * whatever they want.  The only requirement is that list_empty(private_list)
494  * be true at clear_inode() time.
495  *
496  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
497  * filesystems should do that.  invalidate_inode_buffers() should just go
498  * BUG_ON(!list_empty).
499  *
500  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
501  * take an address_space, not an inode.  And it should be called
502  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
503  * queued up.
504  *
505  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
506  * list if it is already on a list.  Because if the buffer is on a list,
507  * it *must* already be on the right one.  If not, the filesystem is being
508  * silly.  This will save a ton of locking.  But first we have to ensure
509  * that buffers are taken *off* the old inode's list when they are freed
510  * (presumably in truncate).  That requires careful auditing of all
511  * filesystems (do it inside bforget()).  It could also be done by bringing
512  * b_inode back.
513  */
514 
515 /*
516  * The buffer's backing address_space's private_lock must be held
517  */
518 static void __remove_assoc_queue(struct buffer_head *bh)
519 {
520 	list_del_init(&bh->b_assoc_buffers);
521 	WARN_ON(!bh->b_assoc_map);
522 	bh->b_assoc_map = NULL;
523 }
524 
525 int inode_has_buffers(struct inode *inode)
526 {
527 	return !list_empty(&inode->i_data.private_list);
528 }
529 
530 /*
531  * osync is designed to support O_SYNC io.  It waits synchronously for
532  * all already-submitted IO to complete, but does not queue any new
533  * writes to the disk.
534  *
535  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
536  * as you dirty the buffers, and then use osync_inode_buffers to wait for
537  * completion.  Any other dirty buffers which are not yet queued for
538  * write will not be flushed to disk by the osync.
539  */
540 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 {
542 	struct buffer_head *bh;
543 	struct list_head *p;
544 	int err = 0;
545 
546 	spin_lock(lock);
547 repeat:
548 	list_for_each_prev(p, list) {
549 		bh = BH_ENTRY(p);
550 		if (buffer_locked(bh)) {
551 			get_bh(bh);
552 			spin_unlock(lock);
553 			wait_on_buffer(bh);
554 			if (!buffer_uptodate(bh))
555 				err = -EIO;
556 			brelse(bh);
557 			spin_lock(lock);
558 			goto repeat;
559 		}
560 	}
561 	spin_unlock(lock);
562 	return err;
563 }
564 
565 void emergency_thaw_bdev(struct super_block *sb)
566 {
567 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
568 		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
569 }
570 
571 /**
572  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
573  * @mapping: the mapping which wants those buffers written
574  *
575  * Starts I/O against the buffers at mapping->private_list, and waits upon
576  * that I/O.
577  *
578  * Basically, this is a convenience function for fsync().
579  * @mapping is a file or directory which needs those buffers to be written for
580  * a successful fsync().
581  */
582 int sync_mapping_buffers(struct address_space *mapping)
583 {
584 	struct address_space *buffer_mapping = mapping->private_data;
585 
586 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
587 		return 0;
588 
589 	return fsync_buffers_list(&buffer_mapping->private_lock,
590 					&mapping->private_list);
591 }
592 EXPORT_SYMBOL(sync_mapping_buffers);
593 
594 /*
595  * Called when we've recently written block `bblock', and it is known that
596  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
597  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
598  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
599  */
600 void write_boundary_block(struct block_device *bdev,
601 			sector_t bblock, unsigned blocksize)
602 {
603 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
604 	if (bh) {
605 		if (buffer_dirty(bh))
606 			write_dirty_buffer(bh, 0);
607 		put_bh(bh);
608 	}
609 }
610 
611 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
612 {
613 	struct address_space *mapping = inode->i_mapping;
614 	struct address_space *buffer_mapping = bh->b_folio->mapping;
615 
616 	mark_buffer_dirty(bh);
617 	if (!mapping->private_data) {
618 		mapping->private_data = buffer_mapping;
619 	} else {
620 		BUG_ON(mapping->private_data != buffer_mapping);
621 	}
622 	if (!bh->b_assoc_map) {
623 		spin_lock(&buffer_mapping->private_lock);
624 		list_move_tail(&bh->b_assoc_buffers,
625 				&mapping->private_list);
626 		bh->b_assoc_map = mapping;
627 		spin_unlock(&buffer_mapping->private_lock);
628 	}
629 }
630 EXPORT_SYMBOL(mark_buffer_dirty_inode);
631 
632 /*
633  * Add a page to the dirty page list.
634  *
635  * It is a sad fact of life that this function is called from several places
636  * deeply under spinlocking.  It may not sleep.
637  *
638  * If the page has buffers, the uptodate buffers are set dirty, to preserve
639  * dirty-state coherency between the page and the buffers.  It the page does
640  * not have buffers then when they are later attached they will all be set
641  * dirty.
642  *
643  * The buffers are dirtied before the page is dirtied.  There's a small race
644  * window in which a writepage caller may see the page cleanness but not the
645  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
646  * before the buffers, a concurrent writepage caller could clear the page dirty
647  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
648  * page on the dirty page list.
649  *
650  * We use private_lock to lock against try_to_free_buffers while using the
651  * page's buffer list.  Also use this to protect against clean buffers being
652  * added to the page after it was set dirty.
653  *
654  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
655  * address_space though.
656  */
657 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
658 {
659 	struct buffer_head *head;
660 	bool newly_dirty;
661 
662 	spin_lock(&mapping->private_lock);
663 	head = folio_buffers(folio);
664 	if (head) {
665 		struct buffer_head *bh = head;
666 
667 		do {
668 			set_buffer_dirty(bh);
669 			bh = bh->b_this_page;
670 		} while (bh != head);
671 	}
672 	/*
673 	 * Lock out page's memcg migration to keep PageDirty
674 	 * synchronized with per-memcg dirty page counters.
675 	 */
676 	folio_memcg_lock(folio);
677 	newly_dirty = !folio_test_set_dirty(folio);
678 	spin_unlock(&mapping->private_lock);
679 
680 	if (newly_dirty)
681 		__folio_mark_dirty(folio, mapping, 1);
682 
683 	folio_memcg_unlock(folio);
684 
685 	if (newly_dirty)
686 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
687 
688 	return newly_dirty;
689 }
690 EXPORT_SYMBOL(block_dirty_folio);
691 
692 /*
693  * Write out and wait upon a list of buffers.
694  *
695  * We have conflicting pressures: we want to make sure that all
696  * initially dirty buffers get waited on, but that any subsequently
697  * dirtied buffers don't.  After all, we don't want fsync to last
698  * forever if somebody is actively writing to the file.
699  *
700  * Do this in two main stages: first we copy dirty buffers to a
701  * temporary inode list, queueing the writes as we go.  Then we clean
702  * up, waiting for those writes to complete.
703  *
704  * During this second stage, any subsequent updates to the file may end
705  * up refiling the buffer on the original inode's dirty list again, so
706  * there is a chance we will end up with a buffer queued for write but
707  * not yet completed on that list.  So, as a final cleanup we go through
708  * the osync code to catch these locked, dirty buffers without requeuing
709  * any newly dirty buffers for write.
710  */
711 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
712 {
713 	struct buffer_head *bh;
714 	struct list_head tmp;
715 	struct address_space *mapping;
716 	int err = 0, err2;
717 	struct blk_plug plug;
718 
719 	INIT_LIST_HEAD(&tmp);
720 	blk_start_plug(&plug);
721 
722 	spin_lock(lock);
723 	while (!list_empty(list)) {
724 		bh = BH_ENTRY(list->next);
725 		mapping = bh->b_assoc_map;
726 		__remove_assoc_queue(bh);
727 		/* Avoid race with mark_buffer_dirty_inode() which does
728 		 * a lockless check and we rely on seeing the dirty bit */
729 		smp_mb();
730 		if (buffer_dirty(bh) || buffer_locked(bh)) {
731 			list_add(&bh->b_assoc_buffers, &tmp);
732 			bh->b_assoc_map = mapping;
733 			if (buffer_dirty(bh)) {
734 				get_bh(bh);
735 				spin_unlock(lock);
736 				/*
737 				 * Ensure any pending I/O completes so that
738 				 * write_dirty_buffer() actually writes the
739 				 * current contents - it is a noop if I/O is
740 				 * still in flight on potentially older
741 				 * contents.
742 				 */
743 				write_dirty_buffer(bh, REQ_SYNC);
744 
745 				/*
746 				 * Kick off IO for the previous mapping. Note
747 				 * that we will not run the very last mapping,
748 				 * wait_on_buffer() will do that for us
749 				 * through sync_buffer().
750 				 */
751 				brelse(bh);
752 				spin_lock(lock);
753 			}
754 		}
755 	}
756 
757 	spin_unlock(lock);
758 	blk_finish_plug(&plug);
759 	spin_lock(lock);
760 
761 	while (!list_empty(&tmp)) {
762 		bh = BH_ENTRY(tmp.prev);
763 		get_bh(bh);
764 		mapping = bh->b_assoc_map;
765 		__remove_assoc_queue(bh);
766 		/* Avoid race with mark_buffer_dirty_inode() which does
767 		 * a lockless check and we rely on seeing the dirty bit */
768 		smp_mb();
769 		if (buffer_dirty(bh)) {
770 			list_add(&bh->b_assoc_buffers,
771 				 &mapping->private_list);
772 			bh->b_assoc_map = mapping;
773 		}
774 		spin_unlock(lock);
775 		wait_on_buffer(bh);
776 		if (!buffer_uptodate(bh))
777 			err = -EIO;
778 		brelse(bh);
779 		spin_lock(lock);
780 	}
781 
782 	spin_unlock(lock);
783 	err2 = osync_buffers_list(lock, list);
784 	if (err)
785 		return err;
786 	else
787 		return err2;
788 }
789 
790 /*
791  * Invalidate any and all dirty buffers on a given inode.  We are
792  * probably unmounting the fs, but that doesn't mean we have already
793  * done a sync().  Just drop the buffers from the inode list.
794  *
795  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
796  * assumes that all the buffers are against the blockdev.  Not true
797  * for reiserfs.
798  */
799 void invalidate_inode_buffers(struct inode *inode)
800 {
801 	if (inode_has_buffers(inode)) {
802 		struct address_space *mapping = &inode->i_data;
803 		struct list_head *list = &mapping->private_list;
804 		struct address_space *buffer_mapping = mapping->private_data;
805 
806 		spin_lock(&buffer_mapping->private_lock);
807 		while (!list_empty(list))
808 			__remove_assoc_queue(BH_ENTRY(list->next));
809 		spin_unlock(&buffer_mapping->private_lock);
810 	}
811 }
812 EXPORT_SYMBOL(invalidate_inode_buffers);
813 
814 /*
815  * Remove any clean buffers from the inode's buffer list.  This is called
816  * when we're trying to free the inode itself.  Those buffers can pin it.
817  *
818  * Returns true if all buffers were removed.
819  */
820 int remove_inode_buffers(struct inode *inode)
821 {
822 	int ret = 1;
823 
824 	if (inode_has_buffers(inode)) {
825 		struct address_space *mapping = &inode->i_data;
826 		struct list_head *list = &mapping->private_list;
827 		struct address_space *buffer_mapping = mapping->private_data;
828 
829 		spin_lock(&buffer_mapping->private_lock);
830 		while (!list_empty(list)) {
831 			struct buffer_head *bh = BH_ENTRY(list->next);
832 			if (buffer_dirty(bh)) {
833 				ret = 0;
834 				break;
835 			}
836 			__remove_assoc_queue(bh);
837 		}
838 		spin_unlock(&buffer_mapping->private_lock);
839 	}
840 	return ret;
841 }
842 
843 /*
844  * Create the appropriate buffers when given a folio for data area and
845  * the size of each buffer.. Use the bh->b_this_page linked list to
846  * follow the buffers created.  Return NULL if unable to create more
847  * buffers.
848  *
849  * The retry flag is used to differentiate async IO (paging, swapping)
850  * which may not fail from ordinary buffer allocations.
851  */
852 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
853 					bool retry)
854 {
855 	struct buffer_head *bh, *head;
856 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
857 	long offset;
858 	struct mem_cgroup *memcg, *old_memcg;
859 
860 	if (retry)
861 		gfp |= __GFP_NOFAIL;
862 
863 	/* The folio lock pins the memcg */
864 	memcg = folio_memcg(folio);
865 	old_memcg = set_active_memcg(memcg);
866 
867 	head = NULL;
868 	offset = folio_size(folio);
869 	while ((offset -= size) >= 0) {
870 		bh = alloc_buffer_head(gfp);
871 		if (!bh)
872 			goto no_grow;
873 
874 		bh->b_this_page = head;
875 		bh->b_blocknr = -1;
876 		head = bh;
877 
878 		bh->b_size = size;
879 
880 		/* Link the buffer to its folio */
881 		folio_set_bh(bh, folio, offset);
882 	}
883 out:
884 	set_active_memcg(old_memcg);
885 	return head;
886 /*
887  * In case anything failed, we just free everything we got.
888  */
889 no_grow:
890 	if (head) {
891 		do {
892 			bh = head;
893 			head = head->b_this_page;
894 			free_buffer_head(bh);
895 		} while (head);
896 	}
897 
898 	goto out;
899 }
900 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
901 
902 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 				       bool retry)
904 {
905 	return folio_alloc_buffers(page_folio(page), size, retry);
906 }
907 EXPORT_SYMBOL_GPL(alloc_page_buffers);
908 
909 static inline void
910 link_dev_buffers(struct page *page, struct buffer_head *head)
911 {
912 	struct buffer_head *bh, *tail;
913 
914 	bh = head;
915 	do {
916 		tail = bh;
917 		bh = bh->b_this_page;
918 	} while (bh);
919 	tail->b_this_page = head;
920 	attach_page_private(page, head);
921 }
922 
923 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
924 {
925 	sector_t retval = ~((sector_t)0);
926 	loff_t sz = bdev_nr_bytes(bdev);
927 
928 	if (sz) {
929 		unsigned int sizebits = blksize_bits(size);
930 		retval = (sz >> sizebits);
931 	}
932 	return retval;
933 }
934 
935 /*
936  * Initialise the state of a blockdev page's buffers.
937  */
938 static sector_t
939 init_page_buffers(struct page *page, struct block_device *bdev,
940 			sector_t block, int size)
941 {
942 	struct buffer_head *head = page_buffers(page);
943 	struct buffer_head *bh = head;
944 	int uptodate = PageUptodate(page);
945 	sector_t end_block = blkdev_max_block(bdev, size);
946 
947 	do {
948 		if (!buffer_mapped(bh)) {
949 			bh->b_end_io = NULL;
950 			bh->b_private = NULL;
951 			bh->b_bdev = bdev;
952 			bh->b_blocknr = block;
953 			if (uptodate)
954 				set_buffer_uptodate(bh);
955 			if (block < end_block)
956 				set_buffer_mapped(bh);
957 		}
958 		block++;
959 		bh = bh->b_this_page;
960 	} while (bh != head);
961 
962 	/*
963 	 * Caller needs to validate requested block against end of device.
964 	 */
965 	return end_block;
966 }
967 
968 /*
969  * Create the page-cache page that contains the requested block.
970  *
971  * This is used purely for blockdev mappings.
972  */
973 static int
974 grow_dev_page(struct block_device *bdev, sector_t block,
975 	      pgoff_t index, int size, int sizebits, gfp_t gfp)
976 {
977 	struct inode *inode = bdev->bd_inode;
978 	struct page *page;
979 	struct buffer_head *bh;
980 	sector_t end_block;
981 	int ret = 0;
982 	gfp_t gfp_mask;
983 
984 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
985 
986 	/*
987 	 * XXX: __getblk_slow() can not really deal with failure and
988 	 * will endlessly loop on improvised global reclaim.  Prefer
989 	 * looping in the allocator rather than here, at least that
990 	 * code knows what it's doing.
991 	 */
992 	gfp_mask |= __GFP_NOFAIL;
993 
994 	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
995 
996 	BUG_ON(!PageLocked(page));
997 
998 	if (page_has_buffers(page)) {
999 		bh = page_buffers(page);
1000 		if (bh->b_size == size) {
1001 			end_block = init_page_buffers(page, bdev,
1002 						(sector_t)index << sizebits,
1003 						size);
1004 			goto done;
1005 		}
1006 		if (!try_to_free_buffers(page_folio(page)))
1007 			goto failed;
1008 	}
1009 
1010 	/*
1011 	 * Allocate some buffers for this page
1012 	 */
1013 	bh = alloc_page_buffers(page, size, true);
1014 
1015 	/*
1016 	 * Link the page to the buffers and initialise them.  Take the
1017 	 * lock to be atomic wrt __find_get_block(), which does not
1018 	 * run under the page lock.
1019 	 */
1020 	spin_lock(&inode->i_mapping->private_lock);
1021 	link_dev_buffers(page, bh);
1022 	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1023 			size);
1024 	spin_unlock(&inode->i_mapping->private_lock);
1025 done:
1026 	ret = (block < end_block) ? 1 : -ENXIO;
1027 failed:
1028 	unlock_page(page);
1029 	put_page(page);
1030 	return ret;
1031 }
1032 
1033 /*
1034  * Create buffers for the specified block device block's page.  If
1035  * that page was dirty, the buffers are set dirty also.
1036  */
1037 static int
1038 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1039 {
1040 	pgoff_t index;
1041 	int sizebits;
1042 
1043 	sizebits = PAGE_SHIFT - __ffs(size);
1044 	index = block >> sizebits;
1045 
1046 	/*
1047 	 * Check for a block which wants to lie outside our maximum possible
1048 	 * pagecache index.  (this comparison is done using sector_t types).
1049 	 */
1050 	if (unlikely(index != block >> sizebits)) {
1051 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1052 			"device %pg\n",
1053 			__func__, (unsigned long long)block,
1054 			bdev);
1055 		return -EIO;
1056 	}
1057 
1058 	/* Create a page with the proper size buffers.. */
1059 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1060 }
1061 
1062 static struct buffer_head *
1063 __getblk_slow(struct block_device *bdev, sector_t block,
1064 	     unsigned size, gfp_t gfp)
1065 {
1066 	/* Size must be multiple of hard sectorsize */
1067 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1068 			(size < 512 || size > PAGE_SIZE))) {
1069 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1070 					size);
1071 		printk(KERN_ERR "logical block size: %d\n",
1072 					bdev_logical_block_size(bdev));
1073 
1074 		dump_stack();
1075 		return NULL;
1076 	}
1077 
1078 	for (;;) {
1079 		struct buffer_head *bh;
1080 		int ret;
1081 
1082 		bh = __find_get_block(bdev, block, size);
1083 		if (bh)
1084 			return bh;
1085 
1086 		ret = grow_buffers(bdev, block, size, gfp);
1087 		if (ret < 0)
1088 			return NULL;
1089 	}
1090 }
1091 
1092 /*
1093  * The relationship between dirty buffers and dirty pages:
1094  *
1095  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1096  * the page is tagged dirty in the page cache.
1097  *
1098  * At all times, the dirtiness of the buffers represents the dirtiness of
1099  * subsections of the page.  If the page has buffers, the page dirty bit is
1100  * merely a hint about the true dirty state.
1101  *
1102  * When a page is set dirty in its entirety, all its buffers are marked dirty
1103  * (if the page has buffers).
1104  *
1105  * When a buffer is marked dirty, its page is dirtied, but the page's other
1106  * buffers are not.
1107  *
1108  * Also.  When blockdev buffers are explicitly read with bread(), they
1109  * individually become uptodate.  But their backing page remains not
1110  * uptodate - even if all of its buffers are uptodate.  A subsequent
1111  * block_read_full_folio() against that folio will discover all the uptodate
1112  * buffers, will set the folio uptodate and will perform no I/O.
1113  */
1114 
1115 /**
1116  * mark_buffer_dirty - mark a buffer_head as needing writeout
1117  * @bh: the buffer_head to mark dirty
1118  *
1119  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1120  * its backing page dirty, then tag the page as dirty in the page cache
1121  * and then attach the address_space's inode to its superblock's dirty
1122  * inode list.
1123  *
1124  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1125  * i_pages lock and mapping->host->i_lock.
1126  */
1127 void mark_buffer_dirty(struct buffer_head *bh)
1128 {
1129 	WARN_ON_ONCE(!buffer_uptodate(bh));
1130 
1131 	trace_block_dirty_buffer(bh);
1132 
1133 	/*
1134 	 * Very *carefully* optimize the it-is-already-dirty case.
1135 	 *
1136 	 * Don't let the final "is it dirty" escape to before we
1137 	 * perhaps modified the buffer.
1138 	 */
1139 	if (buffer_dirty(bh)) {
1140 		smp_mb();
1141 		if (buffer_dirty(bh))
1142 			return;
1143 	}
1144 
1145 	if (!test_set_buffer_dirty(bh)) {
1146 		struct folio *folio = bh->b_folio;
1147 		struct address_space *mapping = NULL;
1148 
1149 		folio_memcg_lock(folio);
1150 		if (!folio_test_set_dirty(folio)) {
1151 			mapping = folio->mapping;
1152 			if (mapping)
1153 				__folio_mark_dirty(folio, mapping, 0);
1154 		}
1155 		folio_memcg_unlock(folio);
1156 		if (mapping)
1157 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1158 	}
1159 }
1160 EXPORT_SYMBOL(mark_buffer_dirty);
1161 
1162 void mark_buffer_write_io_error(struct buffer_head *bh)
1163 {
1164 	struct super_block *sb;
1165 
1166 	set_buffer_write_io_error(bh);
1167 	/* FIXME: do we need to set this in both places? */
1168 	if (bh->b_folio && bh->b_folio->mapping)
1169 		mapping_set_error(bh->b_folio->mapping, -EIO);
1170 	if (bh->b_assoc_map)
1171 		mapping_set_error(bh->b_assoc_map, -EIO);
1172 	rcu_read_lock();
1173 	sb = READ_ONCE(bh->b_bdev->bd_super);
1174 	if (sb)
1175 		errseq_set(&sb->s_wb_err, -EIO);
1176 	rcu_read_unlock();
1177 }
1178 EXPORT_SYMBOL(mark_buffer_write_io_error);
1179 
1180 /*
1181  * Decrement a buffer_head's reference count.  If all buffers against a page
1182  * have zero reference count, are clean and unlocked, and if the page is clean
1183  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1184  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1185  * a page but it ends up not being freed, and buffers may later be reattached).
1186  */
1187 void __brelse(struct buffer_head * buf)
1188 {
1189 	if (atomic_read(&buf->b_count)) {
1190 		put_bh(buf);
1191 		return;
1192 	}
1193 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1194 }
1195 EXPORT_SYMBOL(__brelse);
1196 
1197 /*
1198  * bforget() is like brelse(), except it discards any
1199  * potentially dirty data.
1200  */
1201 void __bforget(struct buffer_head *bh)
1202 {
1203 	clear_buffer_dirty(bh);
1204 	if (bh->b_assoc_map) {
1205 		struct address_space *buffer_mapping = bh->b_folio->mapping;
1206 
1207 		spin_lock(&buffer_mapping->private_lock);
1208 		list_del_init(&bh->b_assoc_buffers);
1209 		bh->b_assoc_map = NULL;
1210 		spin_unlock(&buffer_mapping->private_lock);
1211 	}
1212 	__brelse(bh);
1213 }
1214 EXPORT_SYMBOL(__bforget);
1215 
1216 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1217 {
1218 	lock_buffer(bh);
1219 	if (buffer_uptodate(bh)) {
1220 		unlock_buffer(bh);
1221 		return bh;
1222 	} else {
1223 		get_bh(bh);
1224 		bh->b_end_io = end_buffer_read_sync;
1225 		submit_bh(REQ_OP_READ, bh);
1226 		wait_on_buffer(bh);
1227 		if (buffer_uptodate(bh))
1228 			return bh;
1229 	}
1230 	brelse(bh);
1231 	return NULL;
1232 }
1233 
1234 /*
1235  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1236  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1237  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1238  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1239  * CPU's LRUs at the same time.
1240  *
1241  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1242  * sb_find_get_block().
1243  *
1244  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1245  * a local interrupt disable for that.
1246  */
1247 
1248 #define BH_LRU_SIZE	16
1249 
1250 struct bh_lru {
1251 	struct buffer_head *bhs[BH_LRU_SIZE];
1252 };
1253 
1254 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1255 
1256 #ifdef CONFIG_SMP
1257 #define bh_lru_lock()	local_irq_disable()
1258 #define bh_lru_unlock()	local_irq_enable()
1259 #else
1260 #define bh_lru_lock()	preempt_disable()
1261 #define bh_lru_unlock()	preempt_enable()
1262 #endif
1263 
1264 static inline void check_irqs_on(void)
1265 {
1266 #ifdef irqs_disabled
1267 	BUG_ON(irqs_disabled());
1268 #endif
1269 }
1270 
1271 /*
1272  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1273  * inserted at the front, and the buffer_head at the back if any is evicted.
1274  * Or, if already in the LRU it is moved to the front.
1275  */
1276 static void bh_lru_install(struct buffer_head *bh)
1277 {
1278 	struct buffer_head *evictee = bh;
1279 	struct bh_lru *b;
1280 	int i;
1281 
1282 	check_irqs_on();
1283 	bh_lru_lock();
1284 
1285 	/*
1286 	 * the refcount of buffer_head in bh_lru prevents dropping the
1287 	 * attached page(i.e., try_to_free_buffers) so it could cause
1288 	 * failing page migration.
1289 	 * Skip putting upcoming bh into bh_lru until migration is done.
1290 	 */
1291 	if (lru_cache_disabled()) {
1292 		bh_lru_unlock();
1293 		return;
1294 	}
1295 
1296 	b = this_cpu_ptr(&bh_lrus);
1297 	for (i = 0; i < BH_LRU_SIZE; i++) {
1298 		swap(evictee, b->bhs[i]);
1299 		if (evictee == bh) {
1300 			bh_lru_unlock();
1301 			return;
1302 		}
1303 	}
1304 
1305 	get_bh(bh);
1306 	bh_lru_unlock();
1307 	brelse(evictee);
1308 }
1309 
1310 /*
1311  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1312  */
1313 static struct buffer_head *
1314 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1315 {
1316 	struct buffer_head *ret = NULL;
1317 	unsigned int i;
1318 
1319 	check_irqs_on();
1320 	bh_lru_lock();
1321 	for (i = 0; i < BH_LRU_SIZE; i++) {
1322 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1323 
1324 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1325 		    bh->b_size == size) {
1326 			if (i) {
1327 				while (i) {
1328 					__this_cpu_write(bh_lrus.bhs[i],
1329 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1330 					i--;
1331 				}
1332 				__this_cpu_write(bh_lrus.bhs[0], bh);
1333 			}
1334 			get_bh(bh);
1335 			ret = bh;
1336 			break;
1337 		}
1338 	}
1339 	bh_lru_unlock();
1340 	return ret;
1341 }
1342 
1343 /*
1344  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1345  * it in the LRU and mark it as accessed.  If it is not present then return
1346  * NULL
1347  */
1348 struct buffer_head *
1349 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1350 {
1351 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1352 
1353 	if (bh == NULL) {
1354 		/* __find_get_block_slow will mark the page accessed */
1355 		bh = __find_get_block_slow(bdev, block);
1356 		if (bh)
1357 			bh_lru_install(bh);
1358 	} else
1359 		touch_buffer(bh);
1360 
1361 	return bh;
1362 }
1363 EXPORT_SYMBOL(__find_get_block);
1364 
1365 /*
1366  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1367  * which corresponds to the passed block_device, block and size. The
1368  * returned buffer has its reference count incremented.
1369  *
1370  * __getblk_gfp() will lock up the machine if grow_dev_page's
1371  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1372  */
1373 struct buffer_head *
1374 __getblk_gfp(struct block_device *bdev, sector_t block,
1375 	     unsigned size, gfp_t gfp)
1376 {
1377 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1378 
1379 	might_sleep();
1380 	if (bh == NULL)
1381 		bh = __getblk_slow(bdev, block, size, gfp);
1382 	return bh;
1383 }
1384 EXPORT_SYMBOL(__getblk_gfp);
1385 
1386 /*
1387  * Do async read-ahead on a buffer..
1388  */
1389 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1390 {
1391 	struct buffer_head *bh = __getblk(bdev, block, size);
1392 	if (likely(bh)) {
1393 		bh_readahead(bh, REQ_RAHEAD);
1394 		brelse(bh);
1395 	}
1396 }
1397 EXPORT_SYMBOL(__breadahead);
1398 
1399 /**
1400  *  __bread_gfp() - reads a specified block and returns the bh
1401  *  @bdev: the block_device to read from
1402  *  @block: number of block
1403  *  @size: size (in bytes) to read
1404  *  @gfp: page allocation flag
1405  *
1406  *  Reads a specified block, and returns buffer head that contains it.
1407  *  The page cache can be allocated from non-movable area
1408  *  not to prevent page migration if you set gfp to zero.
1409  *  It returns NULL if the block was unreadable.
1410  */
1411 struct buffer_head *
1412 __bread_gfp(struct block_device *bdev, sector_t block,
1413 		   unsigned size, gfp_t gfp)
1414 {
1415 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1416 
1417 	if (likely(bh) && !buffer_uptodate(bh))
1418 		bh = __bread_slow(bh);
1419 	return bh;
1420 }
1421 EXPORT_SYMBOL(__bread_gfp);
1422 
1423 static void __invalidate_bh_lrus(struct bh_lru *b)
1424 {
1425 	int i;
1426 
1427 	for (i = 0; i < BH_LRU_SIZE; i++) {
1428 		brelse(b->bhs[i]);
1429 		b->bhs[i] = NULL;
1430 	}
1431 }
1432 /*
1433  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1434  * This doesn't race because it runs in each cpu either in irq
1435  * or with preempt disabled.
1436  */
1437 static void invalidate_bh_lru(void *arg)
1438 {
1439 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1440 
1441 	__invalidate_bh_lrus(b);
1442 	put_cpu_var(bh_lrus);
1443 }
1444 
1445 bool has_bh_in_lru(int cpu, void *dummy)
1446 {
1447 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1448 	int i;
1449 
1450 	for (i = 0; i < BH_LRU_SIZE; i++) {
1451 		if (b->bhs[i])
1452 			return true;
1453 	}
1454 
1455 	return false;
1456 }
1457 
1458 void invalidate_bh_lrus(void)
1459 {
1460 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1461 }
1462 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1463 
1464 /*
1465  * It's called from workqueue context so we need a bh_lru_lock to close
1466  * the race with preemption/irq.
1467  */
1468 void invalidate_bh_lrus_cpu(void)
1469 {
1470 	struct bh_lru *b;
1471 
1472 	bh_lru_lock();
1473 	b = this_cpu_ptr(&bh_lrus);
1474 	__invalidate_bh_lrus(b);
1475 	bh_lru_unlock();
1476 }
1477 
1478 void set_bh_page(struct buffer_head *bh,
1479 		struct page *page, unsigned long offset)
1480 {
1481 	bh->b_page = page;
1482 	BUG_ON(offset >= PAGE_SIZE);
1483 	if (PageHighMem(page))
1484 		/*
1485 		 * This catches illegal uses and preserves the offset:
1486 		 */
1487 		bh->b_data = (char *)(0 + offset);
1488 	else
1489 		bh->b_data = page_address(page) + offset;
1490 }
1491 EXPORT_SYMBOL(set_bh_page);
1492 
1493 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1494 		  unsigned long offset)
1495 {
1496 	bh->b_folio = folio;
1497 	BUG_ON(offset >= folio_size(folio));
1498 	if (folio_test_highmem(folio))
1499 		/*
1500 		 * This catches illegal uses and preserves the offset:
1501 		 */
1502 		bh->b_data = (char *)(0 + offset);
1503 	else
1504 		bh->b_data = folio_address(folio) + offset;
1505 }
1506 EXPORT_SYMBOL(folio_set_bh);
1507 
1508 /*
1509  * Called when truncating a buffer on a page completely.
1510  */
1511 
1512 /* Bits that are cleared during an invalidate */
1513 #define BUFFER_FLAGS_DISCARD \
1514 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1515 	 1 << BH_Delay | 1 << BH_Unwritten)
1516 
1517 static void discard_buffer(struct buffer_head * bh)
1518 {
1519 	unsigned long b_state;
1520 
1521 	lock_buffer(bh);
1522 	clear_buffer_dirty(bh);
1523 	bh->b_bdev = NULL;
1524 	b_state = READ_ONCE(bh->b_state);
1525 	do {
1526 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1527 			      b_state & ~BUFFER_FLAGS_DISCARD));
1528 	unlock_buffer(bh);
1529 }
1530 
1531 /**
1532  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1533  * @folio: The folio which is affected.
1534  * @offset: start of the range to invalidate
1535  * @length: length of the range to invalidate
1536  *
1537  * block_invalidate_folio() is called when all or part of the folio has been
1538  * invalidated by a truncate operation.
1539  *
1540  * block_invalidate_folio() does not have to release all buffers, but it must
1541  * ensure that no dirty buffer is left outside @offset and that no I/O
1542  * is underway against any of the blocks which are outside the truncation
1543  * point.  Because the caller is about to free (and possibly reuse) those
1544  * blocks on-disk.
1545  */
1546 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1547 {
1548 	struct buffer_head *head, *bh, *next;
1549 	size_t curr_off = 0;
1550 	size_t stop = length + offset;
1551 
1552 	BUG_ON(!folio_test_locked(folio));
1553 
1554 	/*
1555 	 * Check for overflow
1556 	 */
1557 	BUG_ON(stop > folio_size(folio) || stop < length);
1558 
1559 	head = folio_buffers(folio);
1560 	if (!head)
1561 		return;
1562 
1563 	bh = head;
1564 	do {
1565 		size_t next_off = curr_off + bh->b_size;
1566 		next = bh->b_this_page;
1567 
1568 		/*
1569 		 * Are we still fully in range ?
1570 		 */
1571 		if (next_off > stop)
1572 			goto out;
1573 
1574 		/*
1575 		 * is this block fully invalidated?
1576 		 */
1577 		if (offset <= curr_off)
1578 			discard_buffer(bh);
1579 		curr_off = next_off;
1580 		bh = next;
1581 	} while (bh != head);
1582 
1583 	/*
1584 	 * We release buffers only if the entire folio is being invalidated.
1585 	 * The get_block cached value has been unconditionally invalidated,
1586 	 * so real IO is not possible anymore.
1587 	 */
1588 	if (length == folio_size(folio))
1589 		filemap_release_folio(folio, 0);
1590 out:
1591 	return;
1592 }
1593 EXPORT_SYMBOL(block_invalidate_folio);
1594 
1595 /*
1596  * We attach and possibly dirty the buffers atomically wrt
1597  * block_dirty_folio() via private_lock.  try_to_free_buffers
1598  * is already excluded via the folio lock.
1599  */
1600 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1601 				unsigned long b_state)
1602 {
1603 	struct buffer_head *bh, *head, *tail;
1604 
1605 	head = folio_alloc_buffers(folio, blocksize, true);
1606 	bh = head;
1607 	do {
1608 		bh->b_state |= b_state;
1609 		tail = bh;
1610 		bh = bh->b_this_page;
1611 	} while (bh);
1612 	tail->b_this_page = head;
1613 
1614 	spin_lock(&folio->mapping->private_lock);
1615 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1616 		bh = head;
1617 		do {
1618 			if (folio_test_dirty(folio))
1619 				set_buffer_dirty(bh);
1620 			if (folio_test_uptodate(folio))
1621 				set_buffer_uptodate(bh);
1622 			bh = bh->b_this_page;
1623 		} while (bh != head);
1624 	}
1625 	folio_attach_private(folio, head);
1626 	spin_unlock(&folio->mapping->private_lock);
1627 }
1628 EXPORT_SYMBOL(folio_create_empty_buffers);
1629 
1630 void create_empty_buffers(struct page *page,
1631 			unsigned long blocksize, unsigned long b_state)
1632 {
1633 	folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1634 }
1635 EXPORT_SYMBOL(create_empty_buffers);
1636 
1637 /**
1638  * clean_bdev_aliases: clean a range of buffers in block device
1639  * @bdev: Block device to clean buffers in
1640  * @block: Start of a range of blocks to clean
1641  * @len: Number of blocks to clean
1642  *
1643  * We are taking a range of blocks for data and we don't want writeback of any
1644  * buffer-cache aliases starting from return from this function and until the
1645  * moment when something will explicitly mark the buffer dirty (hopefully that
1646  * will not happen until we will free that block ;-) We don't even need to mark
1647  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1648  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1649  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1650  * would confuse anyone who might pick it with bread() afterwards...
1651  *
1652  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1653  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1654  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1655  * need to.  That happens here.
1656  */
1657 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1658 {
1659 	struct inode *bd_inode = bdev->bd_inode;
1660 	struct address_space *bd_mapping = bd_inode->i_mapping;
1661 	struct folio_batch fbatch;
1662 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1663 	pgoff_t end;
1664 	int i, count;
1665 	struct buffer_head *bh;
1666 	struct buffer_head *head;
1667 
1668 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1669 	folio_batch_init(&fbatch);
1670 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1671 		count = folio_batch_count(&fbatch);
1672 		for (i = 0; i < count; i++) {
1673 			struct folio *folio = fbatch.folios[i];
1674 
1675 			if (!folio_buffers(folio))
1676 				continue;
1677 			/*
1678 			 * We use folio lock instead of bd_mapping->private_lock
1679 			 * to pin buffers here since we can afford to sleep and
1680 			 * it scales better than a global spinlock lock.
1681 			 */
1682 			folio_lock(folio);
1683 			/* Recheck when the folio is locked which pins bhs */
1684 			head = folio_buffers(folio);
1685 			if (!head)
1686 				goto unlock_page;
1687 			bh = head;
1688 			do {
1689 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1690 					goto next;
1691 				if (bh->b_blocknr >= block + len)
1692 					break;
1693 				clear_buffer_dirty(bh);
1694 				wait_on_buffer(bh);
1695 				clear_buffer_req(bh);
1696 next:
1697 				bh = bh->b_this_page;
1698 			} while (bh != head);
1699 unlock_page:
1700 			folio_unlock(folio);
1701 		}
1702 		folio_batch_release(&fbatch);
1703 		cond_resched();
1704 		/* End of range already reached? */
1705 		if (index > end || !index)
1706 			break;
1707 	}
1708 }
1709 EXPORT_SYMBOL(clean_bdev_aliases);
1710 
1711 /*
1712  * Size is a power-of-two in the range 512..PAGE_SIZE,
1713  * and the case we care about most is PAGE_SIZE.
1714  *
1715  * So this *could* possibly be written with those
1716  * constraints in mind (relevant mostly if some
1717  * architecture has a slow bit-scan instruction)
1718  */
1719 static inline int block_size_bits(unsigned int blocksize)
1720 {
1721 	return ilog2(blocksize);
1722 }
1723 
1724 static struct buffer_head *folio_create_buffers(struct folio *folio,
1725 						struct inode *inode,
1726 						unsigned int b_state)
1727 {
1728 	BUG_ON(!folio_test_locked(folio));
1729 
1730 	if (!folio_buffers(folio))
1731 		folio_create_empty_buffers(folio,
1732 					   1 << READ_ONCE(inode->i_blkbits),
1733 					   b_state);
1734 	return folio_buffers(folio);
1735 }
1736 
1737 /*
1738  * NOTE! All mapped/uptodate combinations are valid:
1739  *
1740  *	Mapped	Uptodate	Meaning
1741  *
1742  *	No	No		"unknown" - must do get_block()
1743  *	No	Yes		"hole" - zero-filled
1744  *	Yes	No		"allocated" - allocated on disk, not read in
1745  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1746  *
1747  * "Dirty" is valid only with the last case (mapped+uptodate).
1748  */
1749 
1750 /*
1751  * While block_write_full_page is writing back the dirty buffers under
1752  * the page lock, whoever dirtied the buffers may decide to clean them
1753  * again at any time.  We handle that by only looking at the buffer
1754  * state inside lock_buffer().
1755  *
1756  * If block_write_full_page() is called for regular writeback
1757  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1758  * locked buffer.   This only can happen if someone has written the buffer
1759  * directly, with submit_bh().  At the address_space level PageWriteback
1760  * prevents this contention from occurring.
1761  *
1762  * If block_write_full_page() is called with wbc->sync_mode ==
1763  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1764  * causes the writes to be flagged as synchronous writes.
1765  */
1766 int __block_write_full_page(struct inode *inode, struct page *page,
1767 			get_block_t *get_block, struct writeback_control *wbc,
1768 			bh_end_io_t *handler)
1769 {
1770 	int err;
1771 	sector_t block;
1772 	sector_t last_block;
1773 	struct buffer_head *bh, *head;
1774 	unsigned int blocksize, bbits;
1775 	int nr_underway = 0;
1776 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1777 
1778 	head = folio_create_buffers(page_folio(page), inode,
1779 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1780 
1781 	/*
1782 	 * Be very careful.  We have no exclusion from block_dirty_folio
1783 	 * here, and the (potentially unmapped) buffers may become dirty at
1784 	 * any time.  If a buffer becomes dirty here after we've inspected it
1785 	 * then we just miss that fact, and the page stays dirty.
1786 	 *
1787 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1788 	 * handle that here by just cleaning them.
1789 	 */
1790 
1791 	bh = head;
1792 	blocksize = bh->b_size;
1793 	bbits = block_size_bits(blocksize);
1794 
1795 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1796 	last_block = (i_size_read(inode) - 1) >> bbits;
1797 
1798 	/*
1799 	 * Get all the dirty buffers mapped to disk addresses and
1800 	 * handle any aliases from the underlying blockdev's mapping.
1801 	 */
1802 	do {
1803 		if (block > last_block) {
1804 			/*
1805 			 * mapped buffers outside i_size will occur, because
1806 			 * this page can be outside i_size when there is a
1807 			 * truncate in progress.
1808 			 */
1809 			/*
1810 			 * The buffer was zeroed by block_write_full_page()
1811 			 */
1812 			clear_buffer_dirty(bh);
1813 			set_buffer_uptodate(bh);
1814 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1815 			   buffer_dirty(bh)) {
1816 			WARN_ON(bh->b_size != blocksize);
1817 			err = get_block(inode, block, bh, 1);
1818 			if (err)
1819 				goto recover;
1820 			clear_buffer_delay(bh);
1821 			if (buffer_new(bh)) {
1822 				/* blockdev mappings never come here */
1823 				clear_buffer_new(bh);
1824 				clean_bdev_bh_alias(bh);
1825 			}
1826 		}
1827 		bh = bh->b_this_page;
1828 		block++;
1829 	} while (bh != head);
1830 
1831 	do {
1832 		if (!buffer_mapped(bh))
1833 			continue;
1834 		/*
1835 		 * If it's a fully non-blocking write attempt and we cannot
1836 		 * lock the buffer then redirty the page.  Note that this can
1837 		 * potentially cause a busy-wait loop from writeback threads
1838 		 * and kswapd activity, but those code paths have their own
1839 		 * higher-level throttling.
1840 		 */
1841 		if (wbc->sync_mode != WB_SYNC_NONE) {
1842 			lock_buffer(bh);
1843 		} else if (!trylock_buffer(bh)) {
1844 			redirty_page_for_writepage(wbc, page);
1845 			continue;
1846 		}
1847 		if (test_clear_buffer_dirty(bh)) {
1848 			mark_buffer_async_write_endio(bh, handler);
1849 		} else {
1850 			unlock_buffer(bh);
1851 		}
1852 	} while ((bh = bh->b_this_page) != head);
1853 
1854 	/*
1855 	 * The page and its buffers are protected by PageWriteback(), so we can
1856 	 * drop the bh refcounts early.
1857 	 */
1858 	BUG_ON(PageWriteback(page));
1859 	set_page_writeback(page);
1860 
1861 	do {
1862 		struct buffer_head *next = bh->b_this_page;
1863 		if (buffer_async_write(bh)) {
1864 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1865 			nr_underway++;
1866 		}
1867 		bh = next;
1868 	} while (bh != head);
1869 	unlock_page(page);
1870 
1871 	err = 0;
1872 done:
1873 	if (nr_underway == 0) {
1874 		/*
1875 		 * The page was marked dirty, but the buffers were
1876 		 * clean.  Someone wrote them back by hand with
1877 		 * write_dirty_buffer/submit_bh.  A rare case.
1878 		 */
1879 		end_page_writeback(page);
1880 
1881 		/*
1882 		 * The page and buffer_heads can be released at any time from
1883 		 * here on.
1884 		 */
1885 	}
1886 	return err;
1887 
1888 recover:
1889 	/*
1890 	 * ENOSPC, or some other error.  We may already have added some
1891 	 * blocks to the file, so we need to write these out to avoid
1892 	 * exposing stale data.
1893 	 * The page is currently locked and not marked for writeback
1894 	 */
1895 	bh = head;
1896 	/* Recovery: lock and submit the mapped buffers */
1897 	do {
1898 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1899 		    !buffer_delay(bh)) {
1900 			lock_buffer(bh);
1901 			mark_buffer_async_write_endio(bh, handler);
1902 		} else {
1903 			/*
1904 			 * The buffer may have been set dirty during
1905 			 * attachment to a dirty page.
1906 			 */
1907 			clear_buffer_dirty(bh);
1908 		}
1909 	} while ((bh = bh->b_this_page) != head);
1910 	SetPageError(page);
1911 	BUG_ON(PageWriteback(page));
1912 	mapping_set_error(page->mapping, err);
1913 	set_page_writeback(page);
1914 	do {
1915 		struct buffer_head *next = bh->b_this_page;
1916 		if (buffer_async_write(bh)) {
1917 			clear_buffer_dirty(bh);
1918 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1919 			nr_underway++;
1920 		}
1921 		bh = next;
1922 	} while (bh != head);
1923 	unlock_page(page);
1924 	goto done;
1925 }
1926 EXPORT_SYMBOL(__block_write_full_page);
1927 
1928 /*
1929  * If a page has any new buffers, zero them out here, and mark them uptodate
1930  * and dirty so they'll be written out (in order to prevent uninitialised
1931  * block data from leaking). And clear the new bit.
1932  */
1933 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1934 {
1935 	unsigned int block_start, block_end;
1936 	struct buffer_head *head, *bh;
1937 
1938 	BUG_ON(!PageLocked(page));
1939 	if (!page_has_buffers(page))
1940 		return;
1941 
1942 	bh = head = page_buffers(page);
1943 	block_start = 0;
1944 	do {
1945 		block_end = block_start + bh->b_size;
1946 
1947 		if (buffer_new(bh)) {
1948 			if (block_end > from && block_start < to) {
1949 				if (!PageUptodate(page)) {
1950 					unsigned start, size;
1951 
1952 					start = max(from, block_start);
1953 					size = min(to, block_end) - start;
1954 
1955 					zero_user(page, start, size);
1956 					set_buffer_uptodate(bh);
1957 				}
1958 
1959 				clear_buffer_new(bh);
1960 				mark_buffer_dirty(bh);
1961 			}
1962 		}
1963 
1964 		block_start = block_end;
1965 		bh = bh->b_this_page;
1966 	} while (bh != head);
1967 }
1968 EXPORT_SYMBOL(page_zero_new_buffers);
1969 
1970 static void
1971 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1972 		const struct iomap *iomap)
1973 {
1974 	loff_t offset = block << inode->i_blkbits;
1975 
1976 	bh->b_bdev = iomap->bdev;
1977 
1978 	/*
1979 	 * Block points to offset in file we need to map, iomap contains
1980 	 * the offset at which the map starts. If the map ends before the
1981 	 * current block, then do not map the buffer and let the caller
1982 	 * handle it.
1983 	 */
1984 	BUG_ON(offset >= iomap->offset + iomap->length);
1985 
1986 	switch (iomap->type) {
1987 	case IOMAP_HOLE:
1988 		/*
1989 		 * If the buffer is not up to date or beyond the current EOF,
1990 		 * we need to mark it as new to ensure sub-block zeroing is
1991 		 * executed if necessary.
1992 		 */
1993 		if (!buffer_uptodate(bh) ||
1994 		    (offset >= i_size_read(inode)))
1995 			set_buffer_new(bh);
1996 		break;
1997 	case IOMAP_DELALLOC:
1998 		if (!buffer_uptodate(bh) ||
1999 		    (offset >= i_size_read(inode)))
2000 			set_buffer_new(bh);
2001 		set_buffer_uptodate(bh);
2002 		set_buffer_mapped(bh);
2003 		set_buffer_delay(bh);
2004 		break;
2005 	case IOMAP_UNWRITTEN:
2006 		/*
2007 		 * For unwritten regions, we always need to ensure that regions
2008 		 * in the block we are not writing to are zeroed. Mark the
2009 		 * buffer as new to ensure this.
2010 		 */
2011 		set_buffer_new(bh);
2012 		set_buffer_unwritten(bh);
2013 		fallthrough;
2014 	case IOMAP_MAPPED:
2015 		if ((iomap->flags & IOMAP_F_NEW) ||
2016 		    offset >= i_size_read(inode))
2017 			set_buffer_new(bh);
2018 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2019 				inode->i_blkbits;
2020 		set_buffer_mapped(bh);
2021 		break;
2022 	}
2023 }
2024 
2025 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2026 		get_block_t *get_block, const struct iomap *iomap)
2027 {
2028 	unsigned from = pos & (PAGE_SIZE - 1);
2029 	unsigned to = from + len;
2030 	struct inode *inode = folio->mapping->host;
2031 	unsigned block_start, block_end;
2032 	sector_t block;
2033 	int err = 0;
2034 	unsigned blocksize, bbits;
2035 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2036 
2037 	BUG_ON(!folio_test_locked(folio));
2038 	BUG_ON(from > PAGE_SIZE);
2039 	BUG_ON(to > PAGE_SIZE);
2040 	BUG_ON(from > to);
2041 
2042 	head = folio_create_buffers(folio, inode, 0);
2043 	blocksize = head->b_size;
2044 	bbits = block_size_bits(blocksize);
2045 
2046 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2047 
2048 	for(bh = head, block_start = 0; bh != head || !block_start;
2049 	    block++, block_start=block_end, bh = bh->b_this_page) {
2050 		block_end = block_start + blocksize;
2051 		if (block_end <= from || block_start >= to) {
2052 			if (folio_test_uptodate(folio)) {
2053 				if (!buffer_uptodate(bh))
2054 					set_buffer_uptodate(bh);
2055 			}
2056 			continue;
2057 		}
2058 		if (buffer_new(bh))
2059 			clear_buffer_new(bh);
2060 		if (!buffer_mapped(bh)) {
2061 			WARN_ON(bh->b_size != blocksize);
2062 			if (get_block) {
2063 				err = get_block(inode, block, bh, 1);
2064 				if (err)
2065 					break;
2066 			} else {
2067 				iomap_to_bh(inode, block, bh, iomap);
2068 			}
2069 
2070 			if (buffer_new(bh)) {
2071 				clean_bdev_bh_alias(bh);
2072 				if (folio_test_uptodate(folio)) {
2073 					clear_buffer_new(bh);
2074 					set_buffer_uptodate(bh);
2075 					mark_buffer_dirty(bh);
2076 					continue;
2077 				}
2078 				if (block_end > to || block_start < from)
2079 					folio_zero_segments(folio,
2080 						to, block_end,
2081 						block_start, from);
2082 				continue;
2083 			}
2084 		}
2085 		if (folio_test_uptodate(folio)) {
2086 			if (!buffer_uptodate(bh))
2087 				set_buffer_uptodate(bh);
2088 			continue;
2089 		}
2090 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2091 		    !buffer_unwritten(bh) &&
2092 		     (block_start < from || block_end > to)) {
2093 			bh_read_nowait(bh, 0);
2094 			*wait_bh++=bh;
2095 		}
2096 	}
2097 	/*
2098 	 * If we issued read requests - let them complete.
2099 	 */
2100 	while(wait_bh > wait) {
2101 		wait_on_buffer(*--wait_bh);
2102 		if (!buffer_uptodate(*wait_bh))
2103 			err = -EIO;
2104 	}
2105 	if (unlikely(err))
2106 		page_zero_new_buffers(&folio->page, from, to);
2107 	return err;
2108 }
2109 
2110 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2111 		get_block_t *get_block)
2112 {
2113 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2114 				       NULL);
2115 }
2116 EXPORT_SYMBOL(__block_write_begin);
2117 
2118 static int __block_commit_write(struct inode *inode, struct page *page,
2119 		unsigned from, unsigned to)
2120 {
2121 	unsigned block_start, block_end;
2122 	int partial = 0;
2123 	unsigned blocksize;
2124 	struct buffer_head *bh, *head;
2125 
2126 	bh = head = page_buffers(page);
2127 	blocksize = bh->b_size;
2128 
2129 	block_start = 0;
2130 	do {
2131 		block_end = block_start + blocksize;
2132 		if (block_end <= from || block_start >= to) {
2133 			if (!buffer_uptodate(bh))
2134 				partial = 1;
2135 		} else {
2136 			set_buffer_uptodate(bh);
2137 			mark_buffer_dirty(bh);
2138 		}
2139 		if (buffer_new(bh))
2140 			clear_buffer_new(bh);
2141 
2142 		block_start = block_end;
2143 		bh = bh->b_this_page;
2144 	} while (bh != head);
2145 
2146 	/*
2147 	 * If this is a partial write which happened to make all buffers
2148 	 * uptodate then we can optimize away a bogus read_folio() for
2149 	 * the next read(). Here we 'discover' whether the page went
2150 	 * uptodate as a result of this (potentially partial) write.
2151 	 */
2152 	if (!partial)
2153 		SetPageUptodate(page);
2154 	return 0;
2155 }
2156 
2157 /*
2158  * block_write_begin takes care of the basic task of block allocation and
2159  * bringing partial write blocks uptodate first.
2160  *
2161  * The filesystem needs to handle block truncation upon failure.
2162  */
2163 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2164 		struct page **pagep, get_block_t *get_block)
2165 {
2166 	pgoff_t index = pos >> PAGE_SHIFT;
2167 	struct page *page;
2168 	int status;
2169 
2170 	page = grab_cache_page_write_begin(mapping, index);
2171 	if (!page)
2172 		return -ENOMEM;
2173 
2174 	status = __block_write_begin(page, pos, len, get_block);
2175 	if (unlikely(status)) {
2176 		unlock_page(page);
2177 		put_page(page);
2178 		page = NULL;
2179 	}
2180 
2181 	*pagep = page;
2182 	return status;
2183 }
2184 EXPORT_SYMBOL(block_write_begin);
2185 
2186 int block_write_end(struct file *file, struct address_space *mapping,
2187 			loff_t pos, unsigned len, unsigned copied,
2188 			struct page *page, void *fsdata)
2189 {
2190 	struct inode *inode = mapping->host;
2191 	unsigned start;
2192 
2193 	start = pos & (PAGE_SIZE - 1);
2194 
2195 	if (unlikely(copied < len)) {
2196 		/*
2197 		 * The buffers that were written will now be uptodate, so
2198 		 * we don't have to worry about a read_folio reading them
2199 		 * and overwriting a partial write. However if we have
2200 		 * encountered a short write and only partially written
2201 		 * into a buffer, it will not be marked uptodate, so a
2202 		 * read_folio might come in and destroy our partial write.
2203 		 *
2204 		 * Do the simplest thing, and just treat any short write to a
2205 		 * non uptodate page as a zero-length write, and force the
2206 		 * caller to redo the whole thing.
2207 		 */
2208 		if (!PageUptodate(page))
2209 			copied = 0;
2210 
2211 		page_zero_new_buffers(page, start+copied, start+len);
2212 	}
2213 	flush_dcache_page(page);
2214 
2215 	/* This could be a short (even 0-length) commit */
2216 	__block_commit_write(inode, page, start, start+copied);
2217 
2218 	return copied;
2219 }
2220 EXPORT_SYMBOL(block_write_end);
2221 
2222 int generic_write_end(struct file *file, struct address_space *mapping,
2223 			loff_t pos, unsigned len, unsigned copied,
2224 			struct page *page, void *fsdata)
2225 {
2226 	struct inode *inode = mapping->host;
2227 	loff_t old_size = inode->i_size;
2228 	bool i_size_changed = false;
2229 
2230 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2231 
2232 	/*
2233 	 * No need to use i_size_read() here, the i_size cannot change under us
2234 	 * because we hold i_rwsem.
2235 	 *
2236 	 * But it's important to update i_size while still holding page lock:
2237 	 * page writeout could otherwise come in and zero beyond i_size.
2238 	 */
2239 	if (pos + copied > inode->i_size) {
2240 		i_size_write(inode, pos + copied);
2241 		i_size_changed = true;
2242 	}
2243 
2244 	unlock_page(page);
2245 	put_page(page);
2246 
2247 	if (old_size < pos)
2248 		pagecache_isize_extended(inode, old_size, pos);
2249 	/*
2250 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2251 	 * makes the holding time of page lock longer. Second, it forces lock
2252 	 * ordering of page lock and transaction start for journaling
2253 	 * filesystems.
2254 	 */
2255 	if (i_size_changed)
2256 		mark_inode_dirty(inode);
2257 	return copied;
2258 }
2259 EXPORT_SYMBOL(generic_write_end);
2260 
2261 /*
2262  * block_is_partially_uptodate checks whether buffers within a folio are
2263  * uptodate or not.
2264  *
2265  * Returns true if all buffers which correspond to the specified part
2266  * of the folio are uptodate.
2267  */
2268 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2269 {
2270 	unsigned block_start, block_end, blocksize;
2271 	unsigned to;
2272 	struct buffer_head *bh, *head;
2273 	bool ret = true;
2274 
2275 	head = folio_buffers(folio);
2276 	if (!head)
2277 		return false;
2278 	blocksize = head->b_size;
2279 	to = min_t(unsigned, folio_size(folio) - from, count);
2280 	to = from + to;
2281 	if (from < blocksize && to > folio_size(folio) - blocksize)
2282 		return false;
2283 
2284 	bh = head;
2285 	block_start = 0;
2286 	do {
2287 		block_end = block_start + blocksize;
2288 		if (block_end > from && block_start < to) {
2289 			if (!buffer_uptodate(bh)) {
2290 				ret = false;
2291 				break;
2292 			}
2293 			if (block_end >= to)
2294 				break;
2295 		}
2296 		block_start = block_end;
2297 		bh = bh->b_this_page;
2298 	} while (bh != head);
2299 
2300 	return ret;
2301 }
2302 EXPORT_SYMBOL(block_is_partially_uptodate);
2303 
2304 /*
2305  * Generic "read_folio" function for block devices that have the normal
2306  * get_block functionality. This is most of the block device filesystems.
2307  * Reads the folio asynchronously --- the unlock_buffer() and
2308  * set/clear_buffer_uptodate() functions propagate buffer state into the
2309  * folio once IO has completed.
2310  */
2311 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2312 {
2313 	struct inode *inode = folio->mapping->host;
2314 	sector_t iblock, lblock;
2315 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2316 	unsigned int blocksize, bbits;
2317 	int nr, i;
2318 	int fully_mapped = 1;
2319 	bool page_error = false;
2320 	loff_t limit = i_size_read(inode);
2321 
2322 	/* This is needed for ext4. */
2323 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2324 		limit = inode->i_sb->s_maxbytes;
2325 
2326 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2327 
2328 	head = folio_create_buffers(folio, inode, 0);
2329 	blocksize = head->b_size;
2330 	bbits = block_size_bits(blocksize);
2331 
2332 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2333 	lblock = (limit+blocksize-1) >> bbits;
2334 	bh = head;
2335 	nr = 0;
2336 	i = 0;
2337 
2338 	do {
2339 		if (buffer_uptodate(bh))
2340 			continue;
2341 
2342 		if (!buffer_mapped(bh)) {
2343 			int err = 0;
2344 
2345 			fully_mapped = 0;
2346 			if (iblock < lblock) {
2347 				WARN_ON(bh->b_size != blocksize);
2348 				err = get_block(inode, iblock, bh, 0);
2349 				if (err) {
2350 					folio_set_error(folio);
2351 					page_error = true;
2352 				}
2353 			}
2354 			if (!buffer_mapped(bh)) {
2355 				folio_zero_range(folio, i * blocksize,
2356 						blocksize);
2357 				if (!err)
2358 					set_buffer_uptodate(bh);
2359 				continue;
2360 			}
2361 			/*
2362 			 * get_block() might have updated the buffer
2363 			 * synchronously
2364 			 */
2365 			if (buffer_uptodate(bh))
2366 				continue;
2367 		}
2368 		arr[nr++] = bh;
2369 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2370 
2371 	if (fully_mapped)
2372 		folio_set_mappedtodisk(folio);
2373 
2374 	if (!nr) {
2375 		/*
2376 		 * All buffers are uptodate - we can set the folio uptodate
2377 		 * as well. But not if get_block() returned an error.
2378 		 */
2379 		if (!page_error)
2380 			folio_mark_uptodate(folio);
2381 		folio_unlock(folio);
2382 		return 0;
2383 	}
2384 
2385 	/* Stage two: lock the buffers */
2386 	for (i = 0; i < nr; i++) {
2387 		bh = arr[i];
2388 		lock_buffer(bh);
2389 		mark_buffer_async_read(bh);
2390 	}
2391 
2392 	/*
2393 	 * Stage 3: start the IO.  Check for uptodateness
2394 	 * inside the buffer lock in case another process reading
2395 	 * the underlying blockdev brought it uptodate (the sct fix).
2396 	 */
2397 	for (i = 0; i < nr; i++) {
2398 		bh = arr[i];
2399 		if (buffer_uptodate(bh))
2400 			end_buffer_async_read(bh, 1);
2401 		else
2402 			submit_bh(REQ_OP_READ, bh);
2403 	}
2404 	return 0;
2405 }
2406 EXPORT_SYMBOL(block_read_full_folio);
2407 
2408 /* utility function for filesystems that need to do work on expanding
2409  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2410  * deal with the hole.
2411  */
2412 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2413 {
2414 	struct address_space *mapping = inode->i_mapping;
2415 	const struct address_space_operations *aops = mapping->a_ops;
2416 	struct page *page;
2417 	void *fsdata = NULL;
2418 	int err;
2419 
2420 	err = inode_newsize_ok(inode, size);
2421 	if (err)
2422 		goto out;
2423 
2424 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2425 	if (err)
2426 		goto out;
2427 
2428 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2429 	BUG_ON(err > 0);
2430 
2431 out:
2432 	return err;
2433 }
2434 EXPORT_SYMBOL(generic_cont_expand_simple);
2435 
2436 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2437 			    loff_t pos, loff_t *bytes)
2438 {
2439 	struct inode *inode = mapping->host;
2440 	const struct address_space_operations *aops = mapping->a_ops;
2441 	unsigned int blocksize = i_blocksize(inode);
2442 	struct page *page;
2443 	void *fsdata = NULL;
2444 	pgoff_t index, curidx;
2445 	loff_t curpos;
2446 	unsigned zerofrom, offset, len;
2447 	int err = 0;
2448 
2449 	index = pos >> PAGE_SHIFT;
2450 	offset = pos & ~PAGE_MASK;
2451 
2452 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2453 		zerofrom = curpos & ~PAGE_MASK;
2454 		if (zerofrom & (blocksize-1)) {
2455 			*bytes |= (blocksize-1);
2456 			(*bytes)++;
2457 		}
2458 		len = PAGE_SIZE - zerofrom;
2459 
2460 		err = aops->write_begin(file, mapping, curpos, len,
2461 					    &page, &fsdata);
2462 		if (err)
2463 			goto out;
2464 		zero_user(page, zerofrom, len);
2465 		err = aops->write_end(file, mapping, curpos, len, len,
2466 						page, fsdata);
2467 		if (err < 0)
2468 			goto out;
2469 		BUG_ON(err != len);
2470 		err = 0;
2471 
2472 		balance_dirty_pages_ratelimited(mapping);
2473 
2474 		if (fatal_signal_pending(current)) {
2475 			err = -EINTR;
2476 			goto out;
2477 		}
2478 	}
2479 
2480 	/* page covers the boundary, find the boundary offset */
2481 	if (index == curidx) {
2482 		zerofrom = curpos & ~PAGE_MASK;
2483 		/* if we will expand the thing last block will be filled */
2484 		if (offset <= zerofrom) {
2485 			goto out;
2486 		}
2487 		if (zerofrom & (blocksize-1)) {
2488 			*bytes |= (blocksize-1);
2489 			(*bytes)++;
2490 		}
2491 		len = offset - zerofrom;
2492 
2493 		err = aops->write_begin(file, mapping, curpos, len,
2494 					    &page, &fsdata);
2495 		if (err)
2496 			goto out;
2497 		zero_user(page, zerofrom, len);
2498 		err = aops->write_end(file, mapping, curpos, len, len,
2499 						page, fsdata);
2500 		if (err < 0)
2501 			goto out;
2502 		BUG_ON(err != len);
2503 		err = 0;
2504 	}
2505 out:
2506 	return err;
2507 }
2508 
2509 /*
2510  * For moronic filesystems that do not allow holes in file.
2511  * We may have to extend the file.
2512  */
2513 int cont_write_begin(struct file *file, struct address_space *mapping,
2514 			loff_t pos, unsigned len,
2515 			struct page **pagep, void **fsdata,
2516 			get_block_t *get_block, loff_t *bytes)
2517 {
2518 	struct inode *inode = mapping->host;
2519 	unsigned int blocksize = i_blocksize(inode);
2520 	unsigned int zerofrom;
2521 	int err;
2522 
2523 	err = cont_expand_zero(file, mapping, pos, bytes);
2524 	if (err)
2525 		return err;
2526 
2527 	zerofrom = *bytes & ~PAGE_MASK;
2528 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2529 		*bytes |= (blocksize-1);
2530 		(*bytes)++;
2531 	}
2532 
2533 	return block_write_begin(mapping, pos, len, pagep, get_block);
2534 }
2535 EXPORT_SYMBOL(cont_write_begin);
2536 
2537 int block_commit_write(struct page *page, unsigned from, unsigned to)
2538 {
2539 	struct inode *inode = page->mapping->host;
2540 	__block_commit_write(inode,page,from,to);
2541 	return 0;
2542 }
2543 EXPORT_SYMBOL(block_commit_write);
2544 
2545 /*
2546  * block_page_mkwrite() is not allowed to change the file size as it gets
2547  * called from a page fault handler when a page is first dirtied. Hence we must
2548  * be careful to check for EOF conditions here. We set the page up correctly
2549  * for a written page which means we get ENOSPC checking when writing into
2550  * holes and correct delalloc and unwritten extent mapping on filesystems that
2551  * support these features.
2552  *
2553  * We are not allowed to take the i_mutex here so we have to play games to
2554  * protect against truncate races as the page could now be beyond EOF.  Because
2555  * truncate writes the inode size before removing pages, once we have the
2556  * page lock we can determine safely if the page is beyond EOF. If it is not
2557  * beyond EOF, then the page is guaranteed safe against truncation until we
2558  * unlock the page.
2559  *
2560  * Direct callers of this function should protect against filesystem freezing
2561  * using sb_start_pagefault() - sb_end_pagefault() functions.
2562  */
2563 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2564 			 get_block_t get_block)
2565 {
2566 	struct page *page = vmf->page;
2567 	struct inode *inode = file_inode(vma->vm_file);
2568 	unsigned long end;
2569 	loff_t size;
2570 	int ret;
2571 
2572 	lock_page(page);
2573 	size = i_size_read(inode);
2574 	if ((page->mapping != inode->i_mapping) ||
2575 	    (page_offset(page) > size)) {
2576 		/* We overload EFAULT to mean page got truncated */
2577 		ret = -EFAULT;
2578 		goto out_unlock;
2579 	}
2580 
2581 	/* page is wholly or partially inside EOF */
2582 	if (((page->index + 1) << PAGE_SHIFT) > size)
2583 		end = size & ~PAGE_MASK;
2584 	else
2585 		end = PAGE_SIZE;
2586 
2587 	ret = __block_write_begin(page, 0, end, get_block);
2588 	if (!ret)
2589 		ret = block_commit_write(page, 0, end);
2590 
2591 	if (unlikely(ret < 0))
2592 		goto out_unlock;
2593 	set_page_dirty(page);
2594 	wait_for_stable_page(page);
2595 	return 0;
2596 out_unlock:
2597 	unlock_page(page);
2598 	return ret;
2599 }
2600 EXPORT_SYMBOL(block_page_mkwrite);
2601 
2602 int block_truncate_page(struct address_space *mapping,
2603 			loff_t from, get_block_t *get_block)
2604 {
2605 	pgoff_t index = from >> PAGE_SHIFT;
2606 	unsigned offset = from & (PAGE_SIZE-1);
2607 	unsigned blocksize;
2608 	sector_t iblock;
2609 	unsigned length, pos;
2610 	struct inode *inode = mapping->host;
2611 	struct page *page;
2612 	struct buffer_head *bh;
2613 	int err = 0;
2614 
2615 	blocksize = i_blocksize(inode);
2616 	length = offset & (blocksize - 1);
2617 
2618 	/* Block boundary? Nothing to do */
2619 	if (!length)
2620 		return 0;
2621 
2622 	length = blocksize - length;
2623 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2624 
2625 	page = grab_cache_page(mapping, index);
2626 	if (!page)
2627 		return -ENOMEM;
2628 
2629 	if (!page_has_buffers(page))
2630 		create_empty_buffers(page, blocksize, 0);
2631 
2632 	/* Find the buffer that contains "offset" */
2633 	bh = page_buffers(page);
2634 	pos = blocksize;
2635 	while (offset >= pos) {
2636 		bh = bh->b_this_page;
2637 		iblock++;
2638 		pos += blocksize;
2639 	}
2640 
2641 	if (!buffer_mapped(bh)) {
2642 		WARN_ON(bh->b_size != blocksize);
2643 		err = get_block(inode, iblock, bh, 0);
2644 		if (err)
2645 			goto unlock;
2646 		/* unmapped? It's a hole - nothing to do */
2647 		if (!buffer_mapped(bh))
2648 			goto unlock;
2649 	}
2650 
2651 	/* Ok, it's mapped. Make sure it's up-to-date */
2652 	if (PageUptodate(page))
2653 		set_buffer_uptodate(bh);
2654 
2655 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2656 		err = bh_read(bh, 0);
2657 		/* Uhhuh. Read error. Complain and punt. */
2658 		if (err < 0)
2659 			goto unlock;
2660 	}
2661 
2662 	zero_user(page, offset, length);
2663 	mark_buffer_dirty(bh);
2664 
2665 unlock:
2666 	unlock_page(page);
2667 	put_page(page);
2668 
2669 	return err;
2670 }
2671 EXPORT_SYMBOL(block_truncate_page);
2672 
2673 /*
2674  * The generic ->writepage function for buffer-backed address_spaces
2675  */
2676 int block_write_full_page(struct page *page, get_block_t *get_block,
2677 			struct writeback_control *wbc)
2678 {
2679 	struct inode * const inode = page->mapping->host;
2680 	loff_t i_size = i_size_read(inode);
2681 	const pgoff_t end_index = i_size >> PAGE_SHIFT;
2682 	unsigned offset;
2683 
2684 	/* Is the page fully inside i_size? */
2685 	if (page->index < end_index)
2686 		return __block_write_full_page(inode, page, get_block, wbc,
2687 					       end_buffer_async_write);
2688 
2689 	/* Is the page fully outside i_size? (truncate in progress) */
2690 	offset = i_size & (PAGE_SIZE-1);
2691 	if (page->index >= end_index+1 || !offset) {
2692 		unlock_page(page);
2693 		return 0; /* don't care */
2694 	}
2695 
2696 	/*
2697 	 * The page straddles i_size.  It must be zeroed out on each and every
2698 	 * writepage invocation because it may be mmapped.  "A file is mapped
2699 	 * in multiples of the page size.  For a file that is not a multiple of
2700 	 * the  page size, the remaining memory is zeroed when mapped, and
2701 	 * writes to that region are not written out to the file."
2702 	 */
2703 	zero_user_segment(page, offset, PAGE_SIZE);
2704 	return __block_write_full_page(inode, page, get_block, wbc,
2705 							end_buffer_async_write);
2706 }
2707 EXPORT_SYMBOL(block_write_full_page);
2708 
2709 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2710 			    get_block_t *get_block)
2711 {
2712 	struct inode *inode = mapping->host;
2713 	struct buffer_head tmp = {
2714 		.b_size = i_blocksize(inode),
2715 	};
2716 
2717 	get_block(inode, block, &tmp, 0);
2718 	return tmp.b_blocknr;
2719 }
2720 EXPORT_SYMBOL(generic_block_bmap);
2721 
2722 static void end_bio_bh_io_sync(struct bio *bio)
2723 {
2724 	struct buffer_head *bh = bio->bi_private;
2725 
2726 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2727 		set_bit(BH_Quiet, &bh->b_state);
2728 
2729 	bh->b_end_io(bh, !bio->bi_status);
2730 	bio_put(bio);
2731 }
2732 
2733 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2734 			  struct writeback_control *wbc)
2735 {
2736 	const enum req_op op = opf & REQ_OP_MASK;
2737 	struct bio *bio;
2738 
2739 	BUG_ON(!buffer_locked(bh));
2740 	BUG_ON(!buffer_mapped(bh));
2741 	BUG_ON(!bh->b_end_io);
2742 	BUG_ON(buffer_delay(bh));
2743 	BUG_ON(buffer_unwritten(bh));
2744 
2745 	/*
2746 	 * Only clear out a write error when rewriting
2747 	 */
2748 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2749 		clear_buffer_write_io_error(bh);
2750 
2751 	if (buffer_meta(bh))
2752 		opf |= REQ_META;
2753 	if (buffer_prio(bh))
2754 		opf |= REQ_PRIO;
2755 
2756 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2757 
2758 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2759 
2760 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2761 
2762 	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2763 
2764 	bio->bi_end_io = end_bio_bh_io_sync;
2765 	bio->bi_private = bh;
2766 
2767 	/* Take care of bh's that straddle the end of the device */
2768 	guard_bio_eod(bio);
2769 
2770 	if (wbc) {
2771 		wbc_init_bio(wbc, bio);
2772 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2773 	}
2774 
2775 	submit_bio(bio);
2776 }
2777 
2778 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2779 {
2780 	submit_bh_wbc(opf, bh, NULL);
2781 }
2782 EXPORT_SYMBOL(submit_bh);
2783 
2784 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2785 {
2786 	lock_buffer(bh);
2787 	if (!test_clear_buffer_dirty(bh)) {
2788 		unlock_buffer(bh);
2789 		return;
2790 	}
2791 	bh->b_end_io = end_buffer_write_sync;
2792 	get_bh(bh);
2793 	submit_bh(REQ_OP_WRITE | op_flags, bh);
2794 }
2795 EXPORT_SYMBOL(write_dirty_buffer);
2796 
2797 /*
2798  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2799  * and then start new I/O and then wait upon it.  The caller must have a ref on
2800  * the buffer_head.
2801  */
2802 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2803 {
2804 	WARN_ON(atomic_read(&bh->b_count) < 1);
2805 	lock_buffer(bh);
2806 	if (test_clear_buffer_dirty(bh)) {
2807 		/*
2808 		 * The bh should be mapped, but it might not be if the
2809 		 * device was hot-removed. Not much we can do but fail the I/O.
2810 		 */
2811 		if (!buffer_mapped(bh)) {
2812 			unlock_buffer(bh);
2813 			return -EIO;
2814 		}
2815 
2816 		get_bh(bh);
2817 		bh->b_end_io = end_buffer_write_sync;
2818 		submit_bh(REQ_OP_WRITE | op_flags, bh);
2819 		wait_on_buffer(bh);
2820 		if (!buffer_uptodate(bh))
2821 			return -EIO;
2822 	} else {
2823 		unlock_buffer(bh);
2824 	}
2825 	return 0;
2826 }
2827 EXPORT_SYMBOL(__sync_dirty_buffer);
2828 
2829 int sync_dirty_buffer(struct buffer_head *bh)
2830 {
2831 	return __sync_dirty_buffer(bh, REQ_SYNC);
2832 }
2833 EXPORT_SYMBOL(sync_dirty_buffer);
2834 
2835 /*
2836  * try_to_free_buffers() checks if all the buffers on this particular folio
2837  * are unused, and releases them if so.
2838  *
2839  * Exclusion against try_to_free_buffers may be obtained by either
2840  * locking the folio or by holding its mapping's private_lock.
2841  *
2842  * If the folio is dirty but all the buffers are clean then we need to
2843  * be sure to mark the folio clean as well.  This is because the folio
2844  * may be against a block device, and a later reattachment of buffers
2845  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2846  * filesystem data on the same device.
2847  *
2848  * The same applies to regular filesystem folios: if all the buffers are
2849  * clean then we set the folio clean and proceed.  To do that, we require
2850  * total exclusion from block_dirty_folio().  That is obtained with
2851  * private_lock.
2852  *
2853  * try_to_free_buffers() is non-blocking.
2854  */
2855 static inline int buffer_busy(struct buffer_head *bh)
2856 {
2857 	return atomic_read(&bh->b_count) |
2858 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2859 }
2860 
2861 static bool
2862 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2863 {
2864 	struct buffer_head *head = folio_buffers(folio);
2865 	struct buffer_head *bh;
2866 
2867 	bh = head;
2868 	do {
2869 		if (buffer_busy(bh))
2870 			goto failed;
2871 		bh = bh->b_this_page;
2872 	} while (bh != head);
2873 
2874 	do {
2875 		struct buffer_head *next = bh->b_this_page;
2876 
2877 		if (bh->b_assoc_map)
2878 			__remove_assoc_queue(bh);
2879 		bh = next;
2880 	} while (bh != head);
2881 	*buffers_to_free = head;
2882 	folio_detach_private(folio);
2883 	return true;
2884 failed:
2885 	return false;
2886 }
2887 
2888 bool try_to_free_buffers(struct folio *folio)
2889 {
2890 	struct address_space * const mapping = folio->mapping;
2891 	struct buffer_head *buffers_to_free = NULL;
2892 	bool ret = 0;
2893 
2894 	BUG_ON(!folio_test_locked(folio));
2895 	if (folio_test_writeback(folio))
2896 		return false;
2897 
2898 	if (mapping == NULL) {		/* can this still happen? */
2899 		ret = drop_buffers(folio, &buffers_to_free);
2900 		goto out;
2901 	}
2902 
2903 	spin_lock(&mapping->private_lock);
2904 	ret = drop_buffers(folio, &buffers_to_free);
2905 
2906 	/*
2907 	 * If the filesystem writes its buffers by hand (eg ext3)
2908 	 * then we can have clean buffers against a dirty folio.  We
2909 	 * clean the folio here; otherwise the VM will never notice
2910 	 * that the filesystem did any IO at all.
2911 	 *
2912 	 * Also, during truncate, discard_buffer will have marked all
2913 	 * the folio's buffers clean.  We discover that here and clean
2914 	 * the folio also.
2915 	 *
2916 	 * private_lock must be held over this entire operation in order
2917 	 * to synchronise against block_dirty_folio and prevent the
2918 	 * dirty bit from being lost.
2919 	 */
2920 	if (ret)
2921 		folio_cancel_dirty(folio);
2922 	spin_unlock(&mapping->private_lock);
2923 out:
2924 	if (buffers_to_free) {
2925 		struct buffer_head *bh = buffers_to_free;
2926 
2927 		do {
2928 			struct buffer_head *next = bh->b_this_page;
2929 			free_buffer_head(bh);
2930 			bh = next;
2931 		} while (bh != buffers_to_free);
2932 	}
2933 	return ret;
2934 }
2935 EXPORT_SYMBOL(try_to_free_buffers);
2936 
2937 /*
2938  * Buffer-head allocation
2939  */
2940 static struct kmem_cache *bh_cachep __read_mostly;
2941 
2942 /*
2943  * Once the number of bh's in the machine exceeds this level, we start
2944  * stripping them in writeback.
2945  */
2946 static unsigned long max_buffer_heads;
2947 
2948 int buffer_heads_over_limit;
2949 
2950 struct bh_accounting {
2951 	int nr;			/* Number of live bh's */
2952 	int ratelimit;		/* Limit cacheline bouncing */
2953 };
2954 
2955 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2956 
2957 static void recalc_bh_state(void)
2958 {
2959 	int i;
2960 	int tot = 0;
2961 
2962 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2963 		return;
2964 	__this_cpu_write(bh_accounting.ratelimit, 0);
2965 	for_each_online_cpu(i)
2966 		tot += per_cpu(bh_accounting, i).nr;
2967 	buffer_heads_over_limit = (tot > max_buffer_heads);
2968 }
2969 
2970 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2971 {
2972 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2973 	if (ret) {
2974 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
2975 		spin_lock_init(&ret->b_uptodate_lock);
2976 		preempt_disable();
2977 		__this_cpu_inc(bh_accounting.nr);
2978 		recalc_bh_state();
2979 		preempt_enable();
2980 	}
2981 	return ret;
2982 }
2983 EXPORT_SYMBOL(alloc_buffer_head);
2984 
2985 void free_buffer_head(struct buffer_head *bh)
2986 {
2987 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
2988 	kmem_cache_free(bh_cachep, bh);
2989 	preempt_disable();
2990 	__this_cpu_dec(bh_accounting.nr);
2991 	recalc_bh_state();
2992 	preempt_enable();
2993 }
2994 EXPORT_SYMBOL(free_buffer_head);
2995 
2996 static int buffer_exit_cpu_dead(unsigned int cpu)
2997 {
2998 	int i;
2999 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3000 
3001 	for (i = 0; i < BH_LRU_SIZE; i++) {
3002 		brelse(b->bhs[i]);
3003 		b->bhs[i] = NULL;
3004 	}
3005 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3006 	per_cpu(bh_accounting, cpu).nr = 0;
3007 	return 0;
3008 }
3009 
3010 /**
3011  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3012  * @bh: struct buffer_head
3013  *
3014  * Return true if the buffer is up-to-date and false,
3015  * with the buffer locked, if not.
3016  */
3017 int bh_uptodate_or_lock(struct buffer_head *bh)
3018 {
3019 	if (!buffer_uptodate(bh)) {
3020 		lock_buffer(bh);
3021 		if (!buffer_uptodate(bh))
3022 			return 0;
3023 		unlock_buffer(bh);
3024 	}
3025 	return 1;
3026 }
3027 EXPORT_SYMBOL(bh_uptodate_or_lock);
3028 
3029 /**
3030  * __bh_read - Submit read for a locked buffer
3031  * @bh: struct buffer_head
3032  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3033  * @wait: wait until reading finish
3034  *
3035  * Returns zero on success or don't wait, and -EIO on error.
3036  */
3037 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3038 {
3039 	int ret = 0;
3040 
3041 	BUG_ON(!buffer_locked(bh));
3042 
3043 	get_bh(bh);
3044 	bh->b_end_io = end_buffer_read_sync;
3045 	submit_bh(REQ_OP_READ | op_flags, bh);
3046 	if (wait) {
3047 		wait_on_buffer(bh);
3048 		if (!buffer_uptodate(bh))
3049 			ret = -EIO;
3050 	}
3051 	return ret;
3052 }
3053 EXPORT_SYMBOL(__bh_read);
3054 
3055 /**
3056  * __bh_read_batch - Submit read for a batch of unlocked buffers
3057  * @nr: entry number of the buffer batch
3058  * @bhs: a batch of struct buffer_head
3059  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3060  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3061  *              buffer that cannot lock.
3062  *
3063  * Returns zero on success or don't wait, and -EIO on error.
3064  */
3065 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3066 		     blk_opf_t op_flags, bool force_lock)
3067 {
3068 	int i;
3069 
3070 	for (i = 0; i < nr; i++) {
3071 		struct buffer_head *bh = bhs[i];
3072 
3073 		if (buffer_uptodate(bh))
3074 			continue;
3075 
3076 		if (force_lock)
3077 			lock_buffer(bh);
3078 		else
3079 			if (!trylock_buffer(bh))
3080 				continue;
3081 
3082 		if (buffer_uptodate(bh)) {
3083 			unlock_buffer(bh);
3084 			continue;
3085 		}
3086 
3087 		bh->b_end_io = end_buffer_read_sync;
3088 		get_bh(bh);
3089 		submit_bh(REQ_OP_READ | op_flags, bh);
3090 	}
3091 }
3092 EXPORT_SYMBOL(__bh_read_batch);
3093 
3094 void __init buffer_init(void)
3095 {
3096 	unsigned long nrpages;
3097 	int ret;
3098 
3099 	bh_cachep = kmem_cache_create("buffer_head",
3100 			sizeof(struct buffer_head), 0,
3101 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3102 				SLAB_MEM_SPREAD),
3103 				NULL);
3104 
3105 	/*
3106 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3107 	 */
3108 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3109 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3110 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3111 					NULL, buffer_exit_cpu_dead);
3112 	WARN_ON(ret < 0);
3113 }
3114