xref: /openbmc/linux/fs/buffer.c (revision de16588a7737b12e63ec646d72b45befb2b1f8f7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7 
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53 
54 #include "internal.h"
55 
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 			  struct writeback_control *wbc);
59 
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61 
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64 	trace_block_touch_buffer(bh);
65 	folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68 
69 void __lock_buffer(struct buffer_head *bh)
70 {
71 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74 
75 void unlock_buffer(struct buffer_head *bh)
76 {
77 	clear_bit_unlock(BH_Lock, &bh->b_state);
78 	smp_mb__after_atomic();
79 	wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82 
83 /*
84  * Returns if the folio has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the folio_test_dirty information is stale. If
86  * any of the buffers are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct folio *folio,
89 				     bool *dirty, bool *writeback)
90 {
91 	struct buffer_head *head, *bh;
92 	*dirty = false;
93 	*writeback = false;
94 
95 	BUG_ON(!folio_test_locked(folio));
96 
97 	head = folio_buffers(folio);
98 	if (!head)
99 		return;
100 
101 	if (folio_test_writeback(folio))
102 		*writeback = true;
103 
104 	bh = head;
105 	do {
106 		if (buffer_locked(bh))
107 			*writeback = true;
108 
109 		if (buffer_dirty(bh))
110 			*dirty = true;
111 
112 		bh = bh->b_this_page;
113 	} while (bh != head);
114 }
115 
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126 
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129 	if (!test_bit(BH_Quiet, &bh->b_state))
130 		printk_ratelimited(KERN_ERR
131 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134 
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145 	if (uptodate) {
146 		set_buffer_uptodate(bh);
147 	} else {
148 		/* This happens, due to failed read-ahead attempts. */
149 		clear_buffer_uptodate(bh);
150 	}
151 	unlock_buffer(bh);
152 }
153 
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160 	__end_buffer_read_notouch(bh, uptodate);
161 	put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164 
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167 	if (uptodate) {
168 		set_buffer_uptodate(bh);
169 	} else {
170 		buffer_io_error(bh, ", lost sync page write");
171 		mark_buffer_write_io_error(bh);
172 		clear_buffer_uptodate(bh);
173 	}
174 	unlock_buffer(bh);
175 	put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178 
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * private_lock.
184  *
185  * Hack idea: for the blockdev mapping, private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192 	struct inode *bd_inode = bdev->bd_inode;
193 	struct address_space *bd_mapping = bd_inode->i_mapping;
194 	struct buffer_head *ret = NULL;
195 	pgoff_t index;
196 	struct buffer_head *bh;
197 	struct buffer_head *head;
198 	struct folio *folio;
199 	int all_mapped = 1;
200 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201 
202 	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
203 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204 	if (IS_ERR(folio))
205 		goto out;
206 
207 	spin_lock(&bd_mapping->private_lock);
208 	head = folio_buffers(folio);
209 	if (!head)
210 		goto out_unlock;
211 	bh = head;
212 	do {
213 		if (!buffer_mapped(bh))
214 			all_mapped = 0;
215 		else if (bh->b_blocknr == block) {
216 			ret = bh;
217 			get_bh(bh);
218 			goto out_unlock;
219 		}
220 		bh = bh->b_this_page;
221 	} while (bh != head);
222 
223 	/* we might be here because some of the buffers on this page are
224 	 * not mapped.  This is due to various races between
225 	 * file io on the block device and getblk.  It gets dealt with
226 	 * elsewhere, don't buffer_error if we had some unmapped buffers
227 	 */
228 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229 	if (all_mapped && __ratelimit(&last_warned)) {
230 		printk("__find_get_block_slow() failed. block=%llu, "
231 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 		       "device %pg blocksize: %d\n",
233 		       (unsigned long long)block,
234 		       (unsigned long long)bh->b_blocknr,
235 		       bh->b_state, bh->b_size, bdev,
236 		       1 << bd_inode->i_blkbits);
237 	}
238 out_unlock:
239 	spin_unlock(&bd_mapping->private_lock);
240 	folio_put(folio);
241 out:
242 	return ret;
243 }
244 
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247 	unsigned long flags;
248 	struct buffer_head *first;
249 	struct buffer_head *tmp;
250 	struct folio *folio;
251 	int folio_uptodate = 1;
252 
253 	BUG_ON(!buffer_async_read(bh));
254 
255 	folio = bh->b_folio;
256 	if (uptodate) {
257 		set_buffer_uptodate(bh);
258 	} else {
259 		clear_buffer_uptodate(bh);
260 		buffer_io_error(bh, ", async page read");
261 		folio_set_error(folio);
262 	}
263 
264 	/*
265 	 * Be _very_ careful from here on. Bad things can happen if
266 	 * two buffer heads end IO at almost the same time and both
267 	 * decide that the page is now completely done.
268 	 */
269 	first = folio_buffers(folio);
270 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
271 	clear_buffer_async_read(bh);
272 	unlock_buffer(bh);
273 	tmp = bh;
274 	do {
275 		if (!buffer_uptodate(tmp))
276 			folio_uptodate = 0;
277 		if (buffer_async_read(tmp)) {
278 			BUG_ON(!buffer_locked(tmp));
279 			goto still_busy;
280 		}
281 		tmp = tmp->b_this_page;
282 	} while (tmp != bh);
283 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
284 
285 	/*
286 	 * If all of the buffers are uptodate then we can set the page
287 	 * uptodate.
288 	 */
289 	if (folio_uptodate)
290 		folio_mark_uptodate(folio);
291 	folio_unlock(folio);
292 	return;
293 
294 still_busy:
295 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
296 	return;
297 }
298 
299 struct postprocess_bh_ctx {
300 	struct work_struct work;
301 	struct buffer_head *bh;
302 };
303 
304 static void verify_bh(struct work_struct *work)
305 {
306 	struct postprocess_bh_ctx *ctx =
307 		container_of(work, struct postprocess_bh_ctx, work);
308 	struct buffer_head *bh = ctx->bh;
309 	bool valid;
310 
311 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
312 	end_buffer_async_read(bh, valid);
313 	kfree(ctx);
314 }
315 
316 static bool need_fsverity(struct buffer_head *bh)
317 {
318 	struct folio *folio = bh->b_folio;
319 	struct inode *inode = folio->mapping->host;
320 
321 	return fsverity_active(inode) &&
322 		/* needed by ext4 */
323 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
324 }
325 
326 static void decrypt_bh(struct work_struct *work)
327 {
328 	struct postprocess_bh_ctx *ctx =
329 		container_of(work, struct postprocess_bh_ctx, work);
330 	struct buffer_head *bh = ctx->bh;
331 	int err;
332 
333 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334 					       bh_offset(bh));
335 	if (err == 0 && need_fsverity(bh)) {
336 		/*
337 		 * We use different work queues for decryption and for verity
338 		 * because verity may require reading metadata pages that need
339 		 * decryption, and we shouldn't recurse to the same workqueue.
340 		 */
341 		INIT_WORK(&ctx->work, verify_bh);
342 		fsverity_enqueue_verify_work(&ctx->work);
343 		return;
344 	}
345 	end_buffer_async_read(bh, err == 0);
346 	kfree(ctx);
347 }
348 
349 /*
350  * I/O completion handler for block_read_full_folio() - pages
351  * which come unlocked at the end of I/O.
352  */
353 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
354 {
355 	struct inode *inode = bh->b_folio->mapping->host;
356 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
357 	bool verify = need_fsverity(bh);
358 
359 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
360 	if (uptodate && (decrypt || verify)) {
361 		struct postprocess_bh_ctx *ctx =
362 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
363 
364 		if (ctx) {
365 			ctx->bh = bh;
366 			if (decrypt) {
367 				INIT_WORK(&ctx->work, decrypt_bh);
368 				fscrypt_enqueue_decrypt_work(&ctx->work);
369 			} else {
370 				INIT_WORK(&ctx->work, verify_bh);
371 				fsverity_enqueue_verify_work(&ctx->work);
372 			}
373 			return;
374 		}
375 		uptodate = 0;
376 	}
377 	end_buffer_async_read(bh, uptodate);
378 }
379 
380 /*
381  * Completion handler for block_write_full_page() - pages which are unlocked
382  * during I/O, and which have PageWriteback cleared upon I/O completion.
383  */
384 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
385 {
386 	unsigned long flags;
387 	struct buffer_head *first;
388 	struct buffer_head *tmp;
389 	struct folio *folio;
390 
391 	BUG_ON(!buffer_async_write(bh));
392 
393 	folio = bh->b_folio;
394 	if (uptodate) {
395 		set_buffer_uptodate(bh);
396 	} else {
397 		buffer_io_error(bh, ", lost async page write");
398 		mark_buffer_write_io_error(bh);
399 		clear_buffer_uptodate(bh);
400 		folio_set_error(folio);
401 	}
402 
403 	first = folio_buffers(folio);
404 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
405 
406 	clear_buffer_async_write(bh);
407 	unlock_buffer(bh);
408 	tmp = bh->b_this_page;
409 	while (tmp != bh) {
410 		if (buffer_async_write(tmp)) {
411 			BUG_ON(!buffer_locked(tmp));
412 			goto still_busy;
413 		}
414 		tmp = tmp->b_this_page;
415 	}
416 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
417 	folio_end_writeback(folio);
418 	return;
419 
420 still_busy:
421 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
422 	return;
423 }
424 EXPORT_SYMBOL(end_buffer_async_write);
425 
426 /*
427  * If a page's buffers are under async readin (end_buffer_async_read
428  * completion) then there is a possibility that another thread of
429  * control could lock one of the buffers after it has completed
430  * but while some of the other buffers have not completed.  This
431  * locked buffer would confuse end_buffer_async_read() into not unlocking
432  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
433  * that this buffer is not under async I/O.
434  *
435  * The page comes unlocked when it has no locked buffer_async buffers
436  * left.
437  *
438  * PageLocked prevents anyone starting new async I/O reads any of
439  * the buffers.
440  *
441  * PageWriteback is used to prevent simultaneous writeout of the same
442  * page.
443  *
444  * PageLocked prevents anyone from starting writeback of a page which is
445  * under read I/O (PageWriteback is only ever set against a locked page).
446  */
447 static void mark_buffer_async_read(struct buffer_head *bh)
448 {
449 	bh->b_end_io = end_buffer_async_read_io;
450 	set_buffer_async_read(bh);
451 }
452 
453 static void mark_buffer_async_write_endio(struct buffer_head *bh,
454 					  bh_end_io_t *handler)
455 {
456 	bh->b_end_io = handler;
457 	set_buffer_async_write(bh);
458 }
459 
460 void mark_buffer_async_write(struct buffer_head *bh)
461 {
462 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
463 }
464 EXPORT_SYMBOL(mark_buffer_async_write);
465 
466 
467 /*
468  * fs/buffer.c contains helper functions for buffer-backed address space's
469  * fsync functions.  A common requirement for buffer-based filesystems is
470  * that certain data from the backing blockdev needs to be written out for
471  * a successful fsync().  For example, ext2 indirect blocks need to be
472  * written back and waited upon before fsync() returns.
473  *
474  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
475  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476  * management of a list of dependent buffers at ->i_mapping->private_list.
477  *
478  * Locking is a little subtle: try_to_free_buffers() will remove buffers
479  * from their controlling inode's queue when they are being freed.  But
480  * try_to_free_buffers() will be operating against the *blockdev* mapping
481  * at the time, not against the S_ISREG file which depends on those buffers.
482  * So the locking for private_list is via the private_lock in the address_space
483  * which backs the buffers.  Which is different from the address_space
484  * against which the buffers are listed.  So for a particular address_space,
485  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
486  * mapping->private_list will always be protected by the backing blockdev's
487  * ->private_lock.
488  *
489  * Which introduces a requirement: all buffers on an address_space's
490  * ->private_list must be from the same address_space: the blockdev's.
491  *
492  * address_spaces which do not place buffers at ->private_list via these
493  * utility functions are free to use private_lock and private_list for
494  * whatever they want.  The only requirement is that list_empty(private_list)
495  * be true at clear_inode() time.
496  *
497  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
498  * filesystems should do that.  invalidate_inode_buffers() should just go
499  * BUG_ON(!list_empty).
500  *
501  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
502  * take an address_space, not an inode.  And it should be called
503  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
504  * queued up.
505  *
506  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
507  * list if it is already on a list.  Because if the buffer is on a list,
508  * it *must* already be on the right one.  If not, the filesystem is being
509  * silly.  This will save a ton of locking.  But first we have to ensure
510  * that buffers are taken *off* the old inode's list when they are freed
511  * (presumably in truncate).  That requires careful auditing of all
512  * filesystems (do it inside bforget()).  It could also be done by bringing
513  * b_inode back.
514  */
515 
516 /*
517  * The buffer's backing address_space's private_lock must be held
518  */
519 static void __remove_assoc_queue(struct buffer_head *bh)
520 {
521 	list_del_init(&bh->b_assoc_buffers);
522 	WARN_ON(!bh->b_assoc_map);
523 	bh->b_assoc_map = NULL;
524 }
525 
526 int inode_has_buffers(struct inode *inode)
527 {
528 	return !list_empty(&inode->i_data.private_list);
529 }
530 
531 /*
532  * osync is designed to support O_SYNC io.  It waits synchronously for
533  * all already-submitted IO to complete, but does not queue any new
534  * writes to the disk.
535  *
536  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
537  * as you dirty the buffers, and then use osync_inode_buffers to wait for
538  * completion.  Any other dirty buffers which are not yet queued for
539  * write will not be flushed to disk by the osync.
540  */
541 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
542 {
543 	struct buffer_head *bh;
544 	struct list_head *p;
545 	int err = 0;
546 
547 	spin_lock(lock);
548 repeat:
549 	list_for_each_prev(p, list) {
550 		bh = BH_ENTRY(p);
551 		if (buffer_locked(bh)) {
552 			get_bh(bh);
553 			spin_unlock(lock);
554 			wait_on_buffer(bh);
555 			if (!buffer_uptodate(bh))
556 				err = -EIO;
557 			brelse(bh);
558 			spin_lock(lock);
559 			goto repeat;
560 		}
561 	}
562 	spin_unlock(lock);
563 	return err;
564 }
565 
566 void emergency_thaw_bdev(struct super_block *sb)
567 {
568 	while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
569 		printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
570 }
571 
572 /**
573  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
574  * @mapping: the mapping which wants those buffers written
575  *
576  * Starts I/O against the buffers at mapping->private_list, and waits upon
577  * that I/O.
578  *
579  * Basically, this is a convenience function for fsync().
580  * @mapping is a file or directory which needs those buffers to be written for
581  * a successful fsync().
582  */
583 int sync_mapping_buffers(struct address_space *mapping)
584 {
585 	struct address_space *buffer_mapping = mapping->private_data;
586 
587 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
588 		return 0;
589 
590 	return fsync_buffers_list(&buffer_mapping->private_lock,
591 					&mapping->private_list);
592 }
593 EXPORT_SYMBOL(sync_mapping_buffers);
594 
595 /**
596  * generic_buffers_fsync_noflush - generic buffer fsync implementation
597  * for simple filesystems with no inode lock
598  *
599  * @file:	file to synchronize
600  * @start:	start offset in bytes
601  * @end:	end offset in bytes (inclusive)
602  * @datasync:	only synchronize essential metadata if true
603  *
604  * This is a generic implementation of the fsync method for simple
605  * filesystems which track all non-inode metadata in the buffers list
606  * hanging off the address_space structure.
607  */
608 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
609 				  bool datasync)
610 {
611 	struct inode *inode = file->f_mapping->host;
612 	int err;
613 	int ret;
614 
615 	err = file_write_and_wait_range(file, start, end);
616 	if (err)
617 		return err;
618 
619 	ret = sync_mapping_buffers(inode->i_mapping);
620 	if (!(inode->i_state & I_DIRTY_ALL))
621 		goto out;
622 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
623 		goto out;
624 
625 	err = sync_inode_metadata(inode, 1);
626 	if (ret == 0)
627 		ret = err;
628 
629 out:
630 	/* check and advance again to catch errors after syncing out buffers */
631 	err = file_check_and_advance_wb_err(file);
632 	if (ret == 0)
633 		ret = err;
634 	return ret;
635 }
636 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
637 
638 /**
639  * generic_buffers_fsync - generic buffer fsync implementation
640  * for simple filesystems with no inode lock
641  *
642  * @file:	file to synchronize
643  * @start:	start offset in bytes
644  * @end:	end offset in bytes (inclusive)
645  * @datasync:	only synchronize essential metadata if true
646  *
647  * This is a generic implementation of the fsync method for simple
648  * filesystems which track all non-inode metadata in the buffers list
649  * hanging off the address_space structure. This also makes sure that
650  * a device cache flush operation is called at the end.
651  */
652 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
653 			  bool datasync)
654 {
655 	struct inode *inode = file->f_mapping->host;
656 	int ret;
657 
658 	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
659 	if (!ret)
660 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
661 	return ret;
662 }
663 EXPORT_SYMBOL(generic_buffers_fsync);
664 
665 /*
666  * Called when we've recently written block `bblock', and it is known that
667  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
668  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
669  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
670  */
671 void write_boundary_block(struct block_device *bdev,
672 			sector_t bblock, unsigned blocksize)
673 {
674 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
675 	if (bh) {
676 		if (buffer_dirty(bh))
677 			write_dirty_buffer(bh, 0);
678 		put_bh(bh);
679 	}
680 }
681 
682 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
683 {
684 	struct address_space *mapping = inode->i_mapping;
685 	struct address_space *buffer_mapping = bh->b_folio->mapping;
686 
687 	mark_buffer_dirty(bh);
688 	if (!mapping->private_data) {
689 		mapping->private_data = buffer_mapping;
690 	} else {
691 		BUG_ON(mapping->private_data != buffer_mapping);
692 	}
693 	if (!bh->b_assoc_map) {
694 		spin_lock(&buffer_mapping->private_lock);
695 		list_move_tail(&bh->b_assoc_buffers,
696 				&mapping->private_list);
697 		bh->b_assoc_map = mapping;
698 		spin_unlock(&buffer_mapping->private_lock);
699 	}
700 }
701 EXPORT_SYMBOL(mark_buffer_dirty_inode);
702 
703 /*
704  * Add a page to the dirty page list.
705  *
706  * It is a sad fact of life that this function is called from several places
707  * deeply under spinlocking.  It may not sleep.
708  *
709  * If the page has buffers, the uptodate buffers are set dirty, to preserve
710  * dirty-state coherency between the page and the buffers.  It the page does
711  * not have buffers then when they are later attached they will all be set
712  * dirty.
713  *
714  * The buffers are dirtied before the page is dirtied.  There's a small race
715  * window in which a writepage caller may see the page cleanness but not the
716  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
717  * before the buffers, a concurrent writepage caller could clear the page dirty
718  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
719  * page on the dirty page list.
720  *
721  * We use private_lock to lock against try_to_free_buffers while using the
722  * page's buffer list.  Also use this to protect against clean buffers being
723  * added to the page after it was set dirty.
724  *
725  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
726  * address_space though.
727  */
728 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
729 {
730 	struct buffer_head *head;
731 	bool newly_dirty;
732 
733 	spin_lock(&mapping->private_lock);
734 	head = folio_buffers(folio);
735 	if (head) {
736 		struct buffer_head *bh = head;
737 
738 		do {
739 			set_buffer_dirty(bh);
740 			bh = bh->b_this_page;
741 		} while (bh != head);
742 	}
743 	/*
744 	 * Lock out page's memcg migration to keep PageDirty
745 	 * synchronized with per-memcg dirty page counters.
746 	 */
747 	folio_memcg_lock(folio);
748 	newly_dirty = !folio_test_set_dirty(folio);
749 	spin_unlock(&mapping->private_lock);
750 
751 	if (newly_dirty)
752 		__folio_mark_dirty(folio, mapping, 1);
753 
754 	folio_memcg_unlock(folio);
755 
756 	if (newly_dirty)
757 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
758 
759 	return newly_dirty;
760 }
761 EXPORT_SYMBOL(block_dirty_folio);
762 
763 /*
764  * Write out and wait upon a list of buffers.
765  *
766  * We have conflicting pressures: we want to make sure that all
767  * initially dirty buffers get waited on, but that any subsequently
768  * dirtied buffers don't.  After all, we don't want fsync to last
769  * forever if somebody is actively writing to the file.
770  *
771  * Do this in two main stages: first we copy dirty buffers to a
772  * temporary inode list, queueing the writes as we go.  Then we clean
773  * up, waiting for those writes to complete.
774  *
775  * During this second stage, any subsequent updates to the file may end
776  * up refiling the buffer on the original inode's dirty list again, so
777  * there is a chance we will end up with a buffer queued for write but
778  * not yet completed on that list.  So, as a final cleanup we go through
779  * the osync code to catch these locked, dirty buffers without requeuing
780  * any newly dirty buffers for write.
781  */
782 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
783 {
784 	struct buffer_head *bh;
785 	struct list_head tmp;
786 	struct address_space *mapping;
787 	int err = 0, err2;
788 	struct blk_plug plug;
789 
790 	INIT_LIST_HEAD(&tmp);
791 	blk_start_plug(&plug);
792 
793 	spin_lock(lock);
794 	while (!list_empty(list)) {
795 		bh = BH_ENTRY(list->next);
796 		mapping = bh->b_assoc_map;
797 		__remove_assoc_queue(bh);
798 		/* Avoid race with mark_buffer_dirty_inode() which does
799 		 * a lockless check and we rely on seeing the dirty bit */
800 		smp_mb();
801 		if (buffer_dirty(bh) || buffer_locked(bh)) {
802 			list_add(&bh->b_assoc_buffers, &tmp);
803 			bh->b_assoc_map = mapping;
804 			if (buffer_dirty(bh)) {
805 				get_bh(bh);
806 				spin_unlock(lock);
807 				/*
808 				 * Ensure any pending I/O completes so that
809 				 * write_dirty_buffer() actually writes the
810 				 * current contents - it is a noop if I/O is
811 				 * still in flight on potentially older
812 				 * contents.
813 				 */
814 				write_dirty_buffer(bh, REQ_SYNC);
815 
816 				/*
817 				 * Kick off IO for the previous mapping. Note
818 				 * that we will not run the very last mapping,
819 				 * wait_on_buffer() will do that for us
820 				 * through sync_buffer().
821 				 */
822 				brelse(bh);
823 				spin_lock(lock);
824 			}
825 		}
826 	}
827 
828 	spin_unlock(lock);
829 	blk_finish_plug(&plug);
830 	spin_lock(lock);
831 
832 	while (!list_empty(&tmp)) {
833 		bh = BH_ENTRY(tmp.prev);
834 		get_bh(bh);
835 		mapping = bh->b_assoc_map;
836 		__remove_assoc_queue(bh);
837 		/* Avoid race with mark_buffer_dirty_inode() which does
838 		 * a lockless check and we rely on seeing the dirty bit */
839 		smp_mb();
840 		if (buffer_dirty(bh)) {
841 			list_add(&bh->b_assoc_buffers,
842 				 &mapping->private_list);
843 			bh->b_assoc_map = mapping;
844 		}
845 		spin_unlock(lock);
846 		wait_on_buffer(bh);
847 		if (!buffer_uptodate(bh))
848 			err = -EIO;
849 		brelse(bh);
850 		spin_lock(lock);
851 	}
852 
853 	spin_unlock(lock);
854 	err2 = osync_buffers_list(lock, list);
855 	if (err)
856 		return err;
857 	else
858 		return err2;
859 }
860 
861 /*
862  * Invalidate any and all dirty buffers on a given inode.  We are
863  * probably unmounting the fs, but that doesn't mean we have already
864  * done a sync().  Just drop the buffers from the inode list.
865  *
866  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
867  * assumes that all the buffers are against the blockdev.  Not true
868  * for reiserfs.
869  */
870 void invalidate_inode_buffers(struct inode *inode)
871 {
872 	if (inode_has_buffers(inode)) {
873 		struct address_space *mapping = &inode->i_data;
874 		struct list_head *list = &mapping->private_list;
875 		struct address_space *buffer_mapping = mapping->private_data;
876 
877 		spin_lock(&buffer_mapping->private_lock);
878 		while (!list_empty(list))
879 			__remove_assoc_queue(BH_ENTRY(list->next));
880 		spin_unlock(&buffer_mapping->private_lock);
881 	}
882 }
883 EXPORT_SYMBOL(invalidate_inode_buffers);
884 
885 /*
886  * Remove any clean buffers from the inode's buffer list.  This is called
887  * when we're trying to free the inode itself.  Those buffers can pin it.
888  *
889  * Returns true if all buffers were removed.
890  */
891 int remove_inode_buffers(struct inode *inode)
892 {
893 	int ret = 1;
894 
895 	if (inode_has_buffers(inode)) {
896 		struct address_space *mapping = &inode->i_data;
897 		struct list_head *list = &mapping->private_list;
898 		struct address_space *buffer_mapping = mapping->private_data;
899 
900 		spin_lock(&buffer_mapping->private_lock);
901 		while (!list_empty(list)) {
902 			struct buffer_head *bh = BH_ENTRY(list->next);
903 			if (buffer_dirty(bh)) {
904 				ret = 0;
905 				break;
906 			}
907 			__remove_assoc_queue(bh);
908 		}
909 		spin_unlock(&buffer_mapping->private_lock);
910 	}
911 	return ret;
912 }
913 
914 /*
915  * Create the appropriate buffers when given a folio for data area and
916  * the size of each buffer.. Use the bh->b_this_page linked list to
917  * follow the buffers created.  Return NULL if unable to create more
918  * buffers.
919  *
920  * The retry flag is used to differentiate async IO (paging, swapping)
921  * which may not fail from ordinary buffer allocations.
922  */
923 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
924 					bool retry)
925 {
926 	struct buffer_head *bh, *head;
927 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
928 	long offset;
929 	struct mem_cgroup *memcg, *old_memcg;
930 
931 	if (retry)
932 		gfp |= __GFP_NOFAIL;
933 
934 	/* The folio lock pins the memcg */
935 	memcg = folio_memcg(folio);
936 	old_memcg = set_active_memcg(memcg);
937 
938 	head = NULL;
939 	offset = folio_size(folio);
940 	while ((offset -= size) >= 0) {
941 		bh = alloc_buffer_head(gfp);
942 		if (!bh)
943 			goto no_grow;
944 
945 		bh->b_this_page = head;
946 		bh->b_blocknr = -1;
947 		head = bh;
948 
949 		bh->b_size = size;
950 
951 		/* Link the buffer to its folio */
952 		folio_set_bh(bh, folio, offset);
953 	}
954 out:
955 	set_active_memcg(old_memcg);
956 	return head;
957 /*
958  * In case anything failed, we just free everything we got.
959  */
960 no_grow:
961 	if (head) {
962 		do {
963 			bh = head;
964 			head = head->b_this_page;
965 			free_buffer_head(bh);
966 		} while (head);
967 	}
968 
969 	goto out;
970 }
971 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
972 
973 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
974 				       bool retry)
975 {
976 	return folio_alloc_buffers(page_folio(page), size, retry);
977 }
978 EXPORT_SYMBOL_GPL(alloc_page_buffers);
979 
980 static inline void link_dev_buffers(struct folio *folio,
981 		struct buffer_head *head)
982 {
983 	struct buffer_head *bh, *tail;
984 
985 	bh = head;
986 	do {
987 		tail = bh;
988 		bh = bh->b_this_page;
989 	} while (bh);
990 	tail->b_this_page = head;
991 	folio_attach_private(folio, head);
992 }
993 
994 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
995 {
996 	sector_t retval = ~((sector_t)0);
997 	loff_t sz = bdev_nr_bytes(bdev);
998 
999 	if (sz) {
1000 		unsigned int sizebits = blksize_bits(size);
1001 		retval = (sz >> sizebits);
1002 	}
1003 	return retval;
1004 }
1005 
1006 /*
1007  * Initialise the state of a blockdev folio's buffers.
1008  */
1009 static sector_t folio_init_buffers(struct folio *folio,
1010 		struct block_device *bdev, sector_t block, int size)
1011 {
1012 	struct buffer_head *head = folio_buffers(folio);
1013 	struct buffer_head *bh = head;
1014 	bool uptodate = folio_test_uptodate(folio);
1015 	sector_t end_block = blkdev_max_block(bdev, size);
1016 
1017 	do {
1018 		if (!buffer_mapped(bh)) {
1019 			bh->b_end_io = NULL;
1020 			bh->b_private = NULL;
1021 			bh->b_bdev = bdev;
1022 			bh->b_blocknr = block;
1023 			if (uptodate)
1024 				set_buffer_uptodate(bh);
1025 			if (block < end_block)
1026 				set_buffer_mapped(bh);
1027 		}
1028 		block++;
1029 		bh = bh->b_this_page;
1030 	} while (bh != head);
1031 
1032 	/*
1033 	 * Caller needs to validate requested block against end of device.
1034 	 */
1035 	return end_block;
1036 }
1037 
1038 /*
1039  * Create the page-cache page that contains the requested block.
1040  *
1041  * This is used purely for blockdev mappings.
1042  */
1043 static int
1044 grow_dev_page(struct block_device *bdev, sector_t block,
1045 	      pgoff_t index, int size, int sizebits, gfp_t gfp)
1046 {
1047 	struct inode *inode = bdev->bd_inode;
1048 	struct folio *folio;
1049 	struct buffer_head *bh;
1050 	sector_t end_block;
1051 	int ret = 0;
1052 	gfp_t gfp_mask;
1053 
1054 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
1055 
1056 	/*
1057 	 * XXX: __getblk_slow() can not really deal with failure and
1058 	 * will endlessly loop on improvised global reclaim.  Prefer
1059 	 * looping in the allocator rather than here, at least that
1060 	 * code knows what it's doing.
1061 	 */
1062 	gfp_mask |= __GFP_NOFAIL;
1063 
1064 	folio = __filemap_get_folio(inode->i_mapping, index,
1065 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
1066 
1067 	bh = folio_buffers(folio);
1068 	if (bh) {
1069 		if (bh->b_size == size) {
1070 			end_block = folio_init_buffers(folio, bdev,
1071 					(sector_t)index << sizebits, size);
1072 			goto done;
1073 		}
1074 		if (!try_to_free_buffers(folio))
1075 			goto failed;
1076 	}
1077 
1078 	bh = folio_alloc_buffers(folio, size, true);
1079 
1080 	/*
1081 	 * Link the folio to the buffers and initialise them.  Take the
1082 	 * lock to be atomic wrt __find_get_block(), which does not
1083 	 * run under the folio lock.
1084 	 */
1085 	spin_lock(&inode->i_mapping->private_lock);
1086 	link_dev_buffers(folio, bh);
1087 	end_block = folio_init_buffers(folio, bdev,
1088 			(sector_t)index << sizebits, size);
1089 	spin_unlock(&inode->i_mapping->private_lock);
1090 done:
1091 	ret = (block < end_block) ? 1 : -ENXIO;
1092 failed:
1093 	folio_unlock(folio);
1094 	folio_put(folio);
1095 	return ret;
1096 }
1097 
1098 /*
1099  * Create buffers for the specified block device block's page.  If
1100  * that page was dirty, the buffers are set dirty also.
1101  */
1102 static int
1103 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1104 {
1105 	pgoff_t index;
1106 	int sizebits;
1107 
1108 	sizebits = PAGE_SHIFT - __ffs(size);
1109 	index = block >> sizebits;
1110 
1111 	/*
1112 	 * Check for a block which wants to lie outside our maximum possible
1113 	 * pagecache index.  (this comparison is done using sector_t types).
1114 	 */
1115 	if (unlikely(index != block >> sizebits)) {
1116 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1117 			"device %pg\n",
1118 			__func__, (unsigned long long)block,
1119 			bdev);
1120 		return -EIO;
1121 	}
1122 
1123 	/* Create a page with the proper size buffers.. */
1124 	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1125 }
1126 
1127 static struct buffer_head *
1128 __getblk_slow(struct block_device *bdev, sector_t block,
1129 	     unsigned size, gfp_t gfp)
1130 {
1131 	/* Size must be multiple of hard sectorsize */
1132 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1133 			(size < 512 || size > PAGE_SIZE))) {
1134 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1135 					size);
1136 		printk(KERN_ERR "logical block size: %d\n",
1137 					bdev_logical_block_size(bdev));
1138 
1139 		dump_stack();
1140 		return NULL;
1141 	}
1142 
1143 	for (;;) {
1144 		struct buffer_head *bh;
1145 		int ret;
1146 
1147 		bh = __find_get_block(bdev, block, size);
1148 		if (bh)
1149 			return bh;
1150 
1151 		ret = grow_buffers(bdev, block, size, gfp);
1152 		if (ret < 0)
1153 			return NULL;
1154 	}
1155 }
1156 
1157 /*
1158  * The relationship between dirty buffers and dirty pages:
1159  *
1160  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1161  * the page is tagged dirty in the page cache.
1162  *
1163  * At all times, the dirtiness of the buffers represents the dirtiness of
1164  * subsections of the page.  If the page has buffers, the page dirty bit is
1165  * merely a hint about the true dirty state.
1166  *
1167  * When a page is set dirty in its entirety, all its buffers are marked dirty
1168  * (if the page has buffers).
1169  *
1170  * When a buffer is marked dirty, its page is dirtied, but the page's other
1171  * buffers are not.
1172  *
1173  * Also.  When blockdev buffers are explicitly read with bread(), they
1174  * individually become uptodate.  But their backing page remains not
1175  * uptodate - even if all of its buffers are uptodate.  A subsequent
1176  * block_read_full_folio() against that folio will discover all the uptodate
1177  * buffers, will set the folio uptodate and will perform no I/O.
1178  */
1179 
1180 /**
1181  * mark_buffer_dirty - mark a buffer_head as needing writeout
1182  * @bh: the buffer_head to mark dirty
1183  *
1184  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1185  * its backing page dirty, then tag the page as dirty in the page cache
1186  * and then attach the address_space's inode to its superblock's dirty
1187  * inode list.
1188  *
1189  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1190  * i_pages lock and mapping->host->i_lock.
1191  */
1192 void mark_buffer_dirty(struct buffer_head *bh)
1193 {
1194 	WARN_ON_ONCE(!buffer_uptodate(bh));
1195 
1196 	trace_block_dirty_buffer(bh);
1197 
1198 	/*
1199 	 * Very *carefully* optimize the it-is-already-dirty case.
1200 	 *
1201 	 * Don't let the final "is it dirty" escape to before we
1202 	 * perhaps modified the buffer.
1203 	 */
1204 	if (buffer_dirty(bh)) {
1205 		smp_mb();
1206 		if (buffer_dirty(bh))
1207 			return;
1208 	}
1209 
1210 	if (!test_set_buffer_dirty(bh)) {
1211 		struct folio *folio = bh->b_folio;
1212 		struct address_space *mapping = NULL;
1213 
1214 		folio_memcg_lock(folio);
1215 		if (!folio_test_set_dirty(folio)) {
1216 			mapping = folio->mapping;
1217 			if (mapping)
1218 				__folio_mark_dirty(folio, mapping, 0);
1219 		}
1220 		folio_memcg_unlock(folio);
1221 		if (mapping)
1222 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1223 	}
1224 }
1225 EXPORT_SYMBOL(mark_buffer_dirty);
1226 
1227 void mark_buffer_write_io_error(struct buffer_head *bh)
1228 {
1229 	struct super_block *sb;
1230 
1231 	set_buffer_write_io_error(bh);
1232 	/* FIXME: do we need to set this in both places? */
1233 	if (bh->b_folio && bh->b_folio->mapping)
1234 		mapping_set_error(bh->b_folio->mapping, -EIO);
1235 	if (bh->b_assoc_map)
1236 		mapping_set_error(bh->b_assoc_map, -EIO);
1237 	rcu_read_lock();
1238 	sb = READ_ONCE(bh->b_bdev->bd_super);
1239 	if (sb)
1240 		errseq_set(&sb->s_wb_err, -EIO);
1241 	rcu_read_unlock();
1242 }
1243 EXPORT_SYMBOL(mark_buffer_write_io_error);
1244 
1245 /*
1246  * Decrement a buffer_head's reference count.  If all buffers against a page
1247  * have zero reference count, are clean and unlocked, and if the page is clean
1248  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1249  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1250  * a page but it ends up not being freed, and buffers may later be reattached).
1251  */
1252 void __brelse(struct buffer_head * buf)
1253 {
1254 	if (atomic_read(&buf->b_count)) {
1255 		put_bh(buf);
1256 		return;
1257 	}
1258 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1259 }
1260 EXPORT_SYMBOL(__brelse);
1261 
1262 /*
1263  * bforget() is like brelse(), except it discards any
1264  * potentially dirty data.
1265  */
1266 void __bforget(struct buffer_head *bh)
1267 {
1268 	clear_buffer_dirty(bh);
1269 	if (bh->b_assoc_map) {
1270 		struct address_space *buffer_mapping = bh->b_folio->mapping;
1271 
1272 		spin_lock(&buffer_mapping->private_lock);
1273 		list_del_init(&bh->b_assoc_buffers);
1274 		bh->b_assoc_map = NULL;
1275 		spin_unlock(&buffer_mapping->private_lock);
1276 	}
1277 	__brelse(bh);
1278 }
1279 EXPORT_SYMBOL(__bforget);
1280 
1281 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1282 {
1283 	lock_buffer(bh);
1284 	if (buffer_uptodate(bh)) {
1285 		unlock_buffer(bh);
1286 		return bh;
1287 	} else {
1288 		get_bh(bh);
1289 		bh->b_end_io = end_buffer_read_sync;
1290 		submit_bh(REQ_OP_READ, bh);
1291 		wait_on_buffer(bh);
1292 		if (buffer_uptodate(bh))
1293 			return bh;
1294 	}
1295 	brelse(bh);
1296 	return NULL;
1297 }
1298 
1299 /*
1300  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1301  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1302  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1303  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1304  * CPU's LRUs at the same time.
1305  *
1306  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1307  * sb_find_get_block().
1308  *
1309  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1310  * a local interrupt disable for that.
1311  */
1312 
1313 #define BH_LRU_SIZE	16
1314 
1315 struct bh_lru {
1316 	struct buffer_head *bhs[BH_LRU_SIZE];
1317 };
1318 
1319 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1320 
1321 #ifdef CONFIG_SMP
1322 #define bh_lru_lock()	local_irq_disable()
1323 #define bh_lru_unlock()	local_irq_enable()
1324 #else
1325 #define bh_lru_lock()	preempt_disable()
1326 #define bh_lru_unlock()	preempt_enable()
1327 #endif
1328 
1329 static inline void check_irqs_on(void)
1330 {
1331 #ifdef irqs_disabled
1332 	BUG_ON(irqs_disabled());
1333 #endif
1334 }
1335 
1336 /*
1337  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1338  * inserted at the front, and the buffer_head at the back if any is evicted.
1339  * Or, if already in the LRU it is moved to the front.
1340  */
1341 static void bh_lru_install(struct buffer_head *bh)
1342 {
1343 	struct buffer_head *evictee = bh;
1344 	struct bh_lru *b;
1345 	int i;
1346 
1347 	check_irqs_on();
1348 	bh_lru_lock();
1349 
1350 	/*
1351 	 * the refcount of buffer_head in bh_lru prevents dropping the
1352 	 * attached page(i.e., try_to_free_buffers) so it could cause
1353 	 * failing page migration.
1354 	 * Skip putting upcoming bh into bh_lru until migration is done.
1355 	 */
1356 	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1357 		bh_lru_unlock();
1358 		return;
1359 	}
1360 
1361 	b = this_cpu_ptr(&bh_lrus);
1362 	for (i = 0; i < BH_LRU_SIZE; i++) {
1363 		swap(evictee, b->bhs[i]);
1364 		if (evictee == bh) {
1365 			bh_lru_unlock();
1366 			return;
1367 		}
1368 	}
1369 
1370 	get_bh(bh);
1371 	bh_lru_unlock();
1372 	brelse(evictee);
1373 }
1374 
1375 /*
1376  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1377  */
1378 static struct buffer_head *
1379 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1380 {
1381 	struct buffer_head *ret = NULL;
1382 	unsigned int i;
1383 
1384 	check_irqs_on();
1385 	bh_lru_lock();
1386 	if (cpu_is_isolated(smp_processor_id())) {
1387 		bh_lru_unlock();
1388 		return NULL;
1389 	}
1390 	for (i = 0; i < BH_LRU_SIZE; i++) {
1391 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1392 
1393 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1394 		    bh->b_size == size) {
1395 			if (i) {
1396 				while (i) {
1397 					__this_cpu_write(bh_lrus.bhs[i],
1398 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1399 					i--;
1400 				}
1401 				__this_cpu_write(bh_lrus.bhs[0], bh);
1402 			}
1403 			get_bh(bh);
1404 			ret = bh;
1405 			break;
1406 		}
1407 	}
1408 	bh_lru_unlock();
1409 	return ret;
1410 }
1411 
1412 /*
1413  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1414  * it in the LRU and mark it as accessed.  If it is not present then return
1415  * NULL
1416  */
1417 struct buffer_head *
1418 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1419 {
1420 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1421 
1422 	if (bh == NULL) {
1423 		/* __find_get_block_slow will mark the page accessed */
1424 		bh = __find_get_block_slow(bdev, block);
1425 		if (bh)
1426 			bh_lru_install(bh);
1427 	} else
1428 		touch_buffer(bh);
1429 
1430 	return bh;
1431 }
1432 EXPORT_SYMBOL(__find_get_block);
1433 
1434 /*
1435  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1436  * which corresponds to the passed block_device, block and size. The
1437  * returned buffer has its reference count incremented.
1438  *
1439  * __getblk_gfp() will lock up the machine if grow_dev_page's
1440  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1441  */
1442 struct buffer_head *
1443 __getblk_gfp(struct block_device *bdev, sector_t block,
1444 	     unsigned size, gfp_t gfp)
1445 {
1446 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1447 
1448 	might_sleep();
1449 	if (bh == NULL)
1450 		bh = __getblk_slow(bdev, block, size, gfp);
1451 	return bh;
1452 }
1453 EXPORT_SYMBOL(__getblk_gfp);
1454 
1455 /*
1456  * Do async read-ahead on a buffer..
1457  */
1458 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1459 {
1460 	struct buffer_head *bh = __getblk(bdev, block, size);
1461 	if (likely(bh)) {
1462 		bh_readahead(bh, REQ_RAHEAD);
1463 		brelse(bh);
1464 	}
1465 }
1466 EXPORT_SYMBOL(__breadahead);
1467 
1468 /**
1469  *  __bread_gfp() - reads a specified block and returns the bh
1470  *  @bdev: the block_device to read from
1471  *  @block: number of block
1472  *  @size: size (in bytes) to read
1473  *  @gfp: page allocation flag
1474  *
1475  *  Reads a specified block, and returns buffer head that contains it.
1476  *  The page cache can be allocated from non-movable area
1477  *  not to prevent page migration if you set gfp to zero.
1478  *  It returns NULL if the block was unreadable.
1479  */
1480 struct buffer_head *
1481 __bread_gfp(struct block_device *bdev, sector_t block,
1482 		   unsigned size, gfp_t gfp)
1483 {
1484 	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1485 
1486 	if (likely(bh) && !buffer_uptodate(bh))
1487 		bh = __bread_slow(bh);
1488 	return bh;
1489 }
1490 EXPORT_SYMBOL(__bread_gfp);
1491 
1492 static void __invalidate_bh_lrus(struct bh_lru *b)
1493 {
1494 	int i;
1495 
1496 	for (i = 0; i < BH_LRU_SIZE; i++) {
1497 		brelse(b->bhs[i]);
1498 		b->bhs[i] = NULL;
1499 	}
1500 }
1501 /*
1502  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1503  * This doesn't race because it runs in each cpu either in irq
1504  * or with preempt disabled.
1505  */
1506 static void invalidate_bh_lru(void *arg)
1507 {
1508 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1509 
1510 	__invalidate_bh_lrus(b);
1511 	put_cpu_var(bh_lrus);
1512 }
1513 
1514 bool has_bh_in_lru(int cpu, void *dummy)
1515 {
1516 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1517 	int i;
1518 
1519 	for (i = 0; i < BH_LRU_SIZE; i++) {
1520 		if (b->bhs[i])
1521 			return true;
1522 	}
1523 
1524 	return false;
1525 }
1526 
1527 void invalidate_bh_lrus(void)
1528 {
1529 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1530 }
1531 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1532 
1533 /*
1534  * It's called from workqueue context so we need a bh_lru_lock to close
1535  * the race with preemption/irq.
1536  */
1537 void invalidate_bh_lrus_cpu(void)
1538 {
1539 	struct bh_lru *b;
1540 
1541 	bh_lru_lock();
1542 	b = this_cpu_ptr(&bh_lrus);
1543 	__invalidate_bh_lrus(b);
1544 	bh_lru_unlock();
1545 }
1546 
1547 void set_bh_page(struct buffer_head *bh,
1548 		struct page *page, unsigned long offset)
1549 {
1550 	bh->b_page = page;
1551 	BUG_ON(offset >= PAGE_SIZE);
1552 	if (PageHighMem(page))
1553 		/*
1554 		 * This catches illegal uses and preserves the offset:
1555 		 */
1556 		bh->b_data = (char *)(0 + offset);
1557 	else
1558 		bh->b_data = page_address(page) + offset;
1559 }
1560 EXPORT_SYMBOL(set_bh_page);
1561 
1562 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1563 		  unsigned long offset)
1564 {
1565 	bh->b_folio = folio;
1566 	BUG_ON(offset >= folio_size(folio));
1567 	if (folio_test_highmem(folio))
1568 		/*
1569 		 * This catches illegal uses and preserves the offset:
1570 		 */
1571 		bh->b_data = (char *)(0 + offset);
1572 	else
1573 		bh->b_data = folio_address(folio) + offset;
1574 }
1575 EXPORT_SYMBOL(folio_set_bh);
1576 
1577 /*
1578  * Called when truncating a buffer on a page completely.
1579  */
1580 
1581 /* Bits that are cleared during an invalidate */
1582 #define BUFFER_FLAGS_DISCARD \
1583 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1584 	 1 << BH_Delay | 1 << BH_Unwritten)
1585 
1586 static void discard_buffer(struct buffer_head * bh)
1587 {
1588 	unsigned long b_state;
1589 
1590 	lock_buffer(bh);
1591 	clear_buffer_dirty(bh);
1592 	bh->b_bdev = NULL;
1593 	b_state = READ_ONCE(bh->b_state);
1594 	do {
1595 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1596 			      b_state & ~BUFFER_FLAGS_DISCARD));
1597 	unlock_buffer(bh);
1598 }
1599 
1600 /**
1601  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1602  * @folio: The folio which is affected.
1603  * @offset: start of the range to invalidate
1604  * @length: length of the range to invalidate
1605  *
1606  * block_invalidate_folio() is called when all or part of the folio has been
1607  * invalidated by a truncate operation.
1608  *
1609  * block_invalidate_folio() does not have to release all buffers, but it must
1610  * ensure that no dirty buffer is left outside @offset and that no I/O
1611  * is underway against any of the blocks which are outside the truncation
1612  * point.  Because the caller is about to free (and possibly reuse) those
1613  * blocks on-disk.
1614  */
1615 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1616 {
1617 	struct buffer_head *head, *bh, *next;
1618 	size_t curr_off = 0;
1619 	size_t stop = length + offset;
1620 
1621 	BUG_ON(!folio_test_locked(folio));
1622 
1623 	/*
1624 	 * Check for overflow
1625 	 */
1626 	BUG_ON(stop > folio_size(folio) || stop < length);
1627 
1628 	head = folio_buffers(folio);
1629 	if (!head)
1630 		return;
1631 
1632 	bh = head;
1633 	do {
1634 		size_t next_off = curr_off + bh->b_size;
1635 		next = bh->b_this_page;
1636 
1637 		/*
1638 		 * Are we still fully in range ?
1639 		 */
1640 		if (next_off > stop)
1641 			goto out;
1642 
1643 		/*
1644 		 * is this block fully invalidated?
1645 		 */
1646 		if (offset <= curr_off)
1647 			discard_buffer(bh);
1648 		curr_off = next_off;
1649 		bh = next;
1650 	} while (bh != head);
1651 
1652 	/*
1653 	 * We release buffers only if the entire folio is being invalidated.
1654 	 * The get_block cached value has been unconditionally invalidated,
1655 	 * so real IO is not possible anymore.
1656 	 */
1657 	if (length == folio_size(folio))
1658 		filemap_release_folio(folio, 0);
1659 out:
1660 	return;
1661 }
1662 EXPORT_SYMBOL(block_invalidate_folio);
1663 
1664 /*
1665  * We attach and possibly dirty the buffers atomically wrt
1666  * block_dirty_folio() via private_lock.  try_to_free_buffers
1667  * is already excluded via the folio lock.
1668  */
1669 void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1670 				unsigned long b_state)
1671 {
1672 	struct buffer_head *bh, *head, *tail;
1673 
1674 	head = folio_alloc_buffers(folio, blocksize, true);
1675 	bh = head;
1676 	do {
1677 		bh->b_state |= b_state;
1678 		tail = bh;
1679 		bh = bh->b_this_page;
1680 	} while (bh);
1681 	tail->b_this_page = head;
1682 
1683 	spin_lock(&folio->mapping->private_lock);
1684 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1685 		bh = head;
1686 		do {
1687 			if (folio_test_dirty(folio))
1688 				set_buffer_dirty(bh);
1689 			if (folio_test_uptodate(folio))
1690 				set_buffer_uptodate(bh);
1691 			bh = bh->b_this_page;
1692 		} while (bh != head);
1693 	}
1694 	folio_attach_private(folio, head);
1695 	spin_unlock(&folio->mapping->private_lock);
1696 }
1697 EXPORT_SYMBOL(folio_create_empty_buffers);
1698 
1699 void create_empty_buffers(struct page *page,
1700 			unsigned long blocksize, unsigned long b_state)
1701 {
1702 	folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1703 }
1704 EXPORT_SYMBOL(create_empty_buffers);
1705 
1706 /**
1707  * clean_bdev_aliases: clean a range of buffers in block device
1708  * @bdev: Block device to clean buffers in
1709  * @block: Start of a range of blocks to clean
1710  * @len: Number of blocks to clean
1711  *
1712  * We are taking a range of blocks for data and we don't want writeback of any
1713  * buffer-cache aliases starting from return from this function and until the
1714  * moment when something will explicitly mark the buffer dirty (hopefully that
1715  * will not happen until we will free that block ;-) We don't even need to mark
1716  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1717  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1718  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1719  * would confuse anyone who might pick it with bread() afterwards...
1720  *
1721  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1722  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1723  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1724  * need to.  That happens here.
1725  */
1726 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1727 {
1728 	struct inode *bd_inode = bdev->bd_inode;
1729 	struct address_space *bd_mapping = bd_inode->i_mapping;
1730 	struct folio_batch fbatch;
1731 	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1732 	pgoff_t end;
1733 	int i, count;
1734 	struct buffer_head *bh;
1735 	struct buffer_head *head;
1736 
1737 	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1738 	folio_batch_init(&fbatch);
1739 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1740 		count = folio_batch_count(&fbatch);
1741 		for (i = 0; i < count; i++) {
1742 			struct folio *folio = fbatch.folios[i];
1743 
1744 			if (!folio_buffers(folio))
1745 				continue;
1746 			/*
1747 			 * We use folio lock instead of bd_mapping->private_lock
1748 			 * to pin buffers here since we can afford to sleep and
1749 			 * it scales better than a global spinlock lock.
1750 			 */
1751 			folio_lock(folio);
1752 			/* Recheck when the folio is locked which pins bhs */
1753 			head = folio_buffers(folio);
1754 			if (!head)
1755 				goto unlock_page;
1756 			bh = head;
1757 			do {
1758 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1759 					goto next;
1760 				if (bh->b_blocknr >= block + len)
1761 					break;
1762 				clear_buffer_dirty(bh);
1763 				wait_on_buffer(bh);
1764 				clear_buffer_req(bh);
1765 next:
1766 				bh = bh->b_this_page;
1767 			} while (bh != head);
1768 unlock_page:
1769 			folio_unlock(folio);
1770 		}
1771 		folio_batch_release(&fbatch);
1772 		cond_resched();
1773 		/* End of range already reached? */
1774 		if (index > end || !index)
1775 			break;
1776 	}
1777 }
1778 EXPORT_SYMBOL(clean_bdev_aliases);
1779 
1780 /*
1781  * Size is a power-of-two in the range 512..PAGE_SIZE,
1782  * and the case we care about most is PAGE_SIZE.
1783  *
1784  * So this *could* possibly be written with those
1785  * constraints in mind (relevant mostly if some
1786  * architecture has a slow bit-scan instruction)
1787  */
1788 static inline int block_size_bits(unsigned int blocksize)
1789 {
1790 	return ilog2(blocksize);
1791 }
1792 
1793 static struct buffer_head *folio_create_buffers(struct folio *folio,
1794 						struct inode *inode,
1795 						unsigned int b_state)
1796 {
1797 	BUG_ON(!folio_test_locked(folio));
1798 
1799 	if (!folio_buffers(folio))
1800 		folio_create_empty_buffers(folio,
1801 					   1 << READ_ONCE(inode->i_blkbits),
1802 					   b_state);
1803 	return folio_buffers(folio);
1804 }
1805 
1806 /*
1807  * NOTE! All mapped/uptodate combinations are valid:
1808  *
1809  *	Mapped	Uptodate	Meaning
1810  *
1811  *	No	No		"unknown" - must do get_block()
1812  *	No	Yes		"hole" - zero-filled
1813  *	Yes	No		"allocated" - allocated on disk, not read in
1814  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1815  *
1816  * "Dirty" is valid only with the last case (mapped+uptodate).
1817  */
1818 
1819 /*
1820  * While block_write_full_page is writing back the dirty buffers under
1821  * the page lock, whoever dirtied the buffers may decide to clean them
1822  * again at any time.  We handle that by only looking at the buffer
1823  * state inside lock_buffer().
1824  *
1825  * If block_write_full_page() is called for regular writeback
1826  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1827  * locked buffer.   This only can happen if someone has written the buffer
1828  * directly, with submit_bh().  At the address_space level PageWriteback
1829  * prevents this contention from occurring.
1830  *
1831  * If block_write_full_page() is called with wbc->sync_mode ==
1832  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1833  * causes the writes to be flagged as synchronous writes.
1834  */
1835 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1836 			get_block_t *get_block, struct writeback_control *wbc,
1837 			bh_end_io_t *handler)
1838 {
1839 	int err;
1840 	sector_t block;
1841 	sector_t last_block;
1842 	struct buffer_head *bh, *head;
1843 	unsigned int blocksize, bbits;
1844 	int nr_underway = 0;
1845 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1846 
1847 	head = folio_create_buffers(folio, inode,
1848 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1849 
1850 	/*
1851 	 * Be very careful.  We have no exclusion from block_dirty_folio
1852 	 * here, and the (potentially unmapped) buffers may become dirty at
1853 	 * any time.  If a buffer becomes dirty here after we've inspected it
1854 	 * then we just miss that fact, and the folio stays dirty.
1855 	 *
1856 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1857 	 * handle that here by just cleaning them.
1858 	 */
1859 
1860 	bh = head;
1861 	blocksize = bh->b_size;
1862 	bbits = block_size_bits(blocksize);
1863 
1864 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1865 	last_block = (i_size_read(inode) - 1) >> bbits;
1866 
1867 	/*
1868 	 * Get all the dirty buffers mapped to disk addresses and
1869 	 * handle any aliases from the underlying blockdev's mapping.
1870 	 */
1871 	do {
1872 		if (block > last_block) {
1873 			/*
1874 			 * mapped buffers outside i_size will occur, because
1875 			 * this folio can be outside i_size when there is a
1876 			 * truncate in progress.
1877 			 */
1878 			/*
1879 			 * The buffer was zeroed by block_write_full_page()
1880 			 */
1881 			clear_buffer_dirty(bh);
1882 			set_buffer_uptodate(bh);
1883 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1884 			   buffer_dirty(bh)) {
1885 			WARN_ON(bh->b_size != blocksize);
1886 			err = get_block(inode, block, bh, 1);
1887 			if (err)
1888 				goto recover;
1889 			clear_buffer_delay(bh);
1890 			if (buffer_new(bh)) {
1891 				/* blockdev mappings never come here */
1892 				clear_buffer_new(bh);
1893 				clean_bdev_bh_alias(bh);
1894 			}
1895 		}
1896 		bh = bh->b_this_page;
1897 		block++;
1898 	} while (bh != head);
1899 
1900 	do {
1901 		if (!buffer_mapped(bh))
1902 			continue;
1903 		/*
1904 		 * If it's a fully non-blocking write attempt and we cannot
1905 		 * lock the buffer then redirty the folio.  Note that this can
1906 		 * potentially cause a busy-wait loop from writeback threads
1907 		 * and kswapd activity, but those code paths have their own
1908 		 * higher-level throttling.
1909 		 */
1910 		if (wbc->sync_mode != WB_SYNC_NONE) {
1911 			lock_buffer(bh);
1912 		} else if (!trylock_buffer(bh)) {
1913 			folio_redirty_for_writepage(wbc, folio);
1914 			continue;
1915 		}
1916 		if (test_clear_buffer_dirty(bh)) {
1917 			mark_buffer_async_write_endio(bh, handler);
1918 		} else {
1919 			unlock_buffer(bh);
1920 		}
1921 	} while ((bh = bh->b_this_page) != head);
1922 
1923 	/*
1924 	 * The folio and its buffers are protected by the writeback flag,
1925 	 * so we can drop the bh refcounts early.
1926 	 */
1927 	BUG_ON(folio_test_writeback(folio));
1928 	folio_start_writeback(folio);
1929 
1930 	do {
1931 		struct buffer_head *next = bh->b_this_page;
1932 		if (buffer_async_write(bh)) {
1933 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1934 			nr_underway++;
1935 		}
1936 		bh = next;
1937 	} while (bh != head);
1938 	folio_unlock(folio);
1939 
1940 	err = 0;
1941 done:
1942 	if (nr_underway == 0) {
1943 		/*
1944 		 * The folio was marked dirty, but the buffers were
1945 		 * clean.  Someone wrote them back by hand with
1946 		 * write_dirty_buffer/submit_bh.  A rare case.
1947 		 */
1948 		folio_end_writeback(folio);
1949 
1950 		/*
1951 		 * The folio and buffer_heads can be released at any time from
1952 		 * here on.
1953 		 */
1954 	}
1955 	return err;
1956 
1957 recover:
1958 	/*
1959 	 * ENOSPC, or some other error.  We may already have added some
1960 	 * blocks to the file, so we need to write these out to avoid
1961 	 * exposing stale data.
1962 	 * The folio is currently locked and not marked for writeback
1963 	 */
1964 	bh = head;
1965 	/* Recovery: lock and submit the mapped buffers */
1966 	do {
1967 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1968 		    !buffer_delay(bh)) {
1969 			lock_buffer(bh);
1970 			mark_buffer_async_write_endio(bh, handler);
1971 		} else {
1972 			/*
1973 			 * The buffer may have been set dirty during
1974 			 * attachment to a dirty folio.
1975 			 */
1976 			clear_buffer_dirty(bh);
1977 		}
1978 	} while ((bh = bh->b_this_page) != head);
1979 	folio_set_error(folio);
1980 	BUG_ON(folio_test_writeback(folio));
1981 	mapping_set_error(folio->mapping, err);
1982 	folio_start_writeback(folio);
1983 	do {
1984 		struct buffer_head *next = bh->b_this_page;
1985 		if (buffer_async_write(bh)) {
1986 			clear_buffer_dirty(bh);
1987 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1988 			nr_underway++;
1989 		}
1990 		bh = next;
1991 	} while (bh != head);
1992 	folio_unlock(folio);
1993 	goto done;
1994 }
1995 EXPORT_SYMBOL(__block_write_full_folio);
1996 
1997 /*
1998  * If a folio has any new buffers, zero them out here, and mark them uptodate
1999  * and dirty so they'll be written out (in order to prevent uninitialised
2000  * block data from leaking). And clear the new bit.
2001  */
2002 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
2003 {
2004 	size_t block_start, block_end;
2005 	struct buffer_head *head, *bh;
2006 
2007 	BUG_ON(!folio_test_locked(folio));
2008 	head = folio_buffers(folio);
2009 	if (!head)
2010 		return;
2011 
2012 	bh = head;
2013 	block_start = 0;
2014 	do {
2015 		block_end = block_start + bh->b_size;
2016 
2017 		if (buffer_new(bh)) {
2018 			if (block_end > from && block_start < to) {
2019 				if (!folio_test_uptodate(folio)) {
2020 					size_t start, xend;
2021 
2022 					start = max(from, block_start);
2023 					xend = min(to, block_end);
2024 
2025 					folio_zero_segment(folio, start, xend);
2026 					set_buffer_uptodate(bh);
2027 				}
2028 
2029 				clear_buffer_new(bh);
2030 				mark_buffer_dirty(bh);
2031 			}
2032 		}
2033 
2034 		block_start = block_end;
2035 		bh = bh->b_this_page;
2036 	} while (bh != head);
2037 }
2038 EXPORT_SYMBOL(folio_zero_new_buffers);
2039 
2040 static void
2041 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2042 		const struct iomap *iomap)
2043 {
2044 	loff_t offset = block << inode->i_blkbits;
2045 
2046 	bh->b_bdev = iomap->bdev;
2047 
2048 	/*
2049 	 * Block points to offset in file we need to map, iomap contains
2050 	 * the offset at which the map starts. If the map ends before the
2051 	 * current block, then do not map the buffer and let the caller
2052 	 * handle it.
2053 	 */
2054 	BUG_ON(offset >= iomap->offset + iomap->length);
2055 
2056 	switch (iomap->type) {
2057 	case IOMAP_HOLE:
2058 		/*
2059 		 * If the buffer is not up to date or beyond the current EOF,
2060 		 * we need to mark it as new to ensure sub-block zeroing is
2061 		 * executed if necessary.
2062 		 */
2063 		if (!buffer_uptodate(bh) ||
2064 		    (offset >= i_size_read(inode)))
2065 			set_buffer_new(bh);
2066 		break;
2067 	case IOMAP_DELALLOC:
2068 		if (!buffer_uptodate(bh) ||
2069 		    (offset >= i_size_read(inode)))
2070 			set_buffer_new(bh);
2071 		set_buffer_uptodate(bh);
2072 		set_buffer_mapped(bh);
2073 		set_buffer_delay(bh);
2074 		break;
2075 	case IOMAP_UNWRITTEN:
2076 		/*
2077 		 * For unwritten regions, we always need to ensure that regions
2078 		 * in the block we are not writing to are zeroed. Mark the
2079 		 * buffer as new to ensure this.
2080 		 */
2081 		set_buffer_new(bh);
2082 		set_buffer_unwritten(bh);
2083 		fallthrough;
2084 	case IOMAP_MAPPED:
2085 		if ((iomap->flags & IOMAP_F_NEW) ||
2086 		    offset >= i_size_read(inode))
2087 			set_buffer_new(bh);
2088 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2089 				inode->i_blkbits;
2090 		set_buffer_mapped(bh);
2091 		break;
2092 	}
2093 }
2094 
2095 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2096 		get_block_t *get_block, const struct iomap *iomap)
2097 {
2098 	unsigned from = pos & (PAGE_SIZE - 1);
2099 	unsigned to = from + len;
2100 	struct inode *inode = folio->mapping->host;
2101 	unsigned block_start, block_end;
2102 	sector_t block;
2103 	int err = 0;
2104 	unsigned blocksize, bbits;
2105 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2106 
2107 	BUG_ON(!folio_test_locked(folio));
2108 	BUG_ON(from > PAGE_SIZE);
2109 	BUG_ON(to > PAGE_SIZE);
2110 	BUG_ON(from > to);
2111 
2112 	head = folio_create_buffers(folio, inode, 0);
2113 	blocksize = head->b_size;
2114 	bbits = block_size_bits(blocksize);
2115 
2116 	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2117 
2118 	for(bh = head, block_start = 0; bh != head || !block_start;
2119 	    block++, block_start=block_end, bh = bh->b_this_page) {
2120 		block_end = block_start + blocksize;
2121 		if (block_end <= from || block_start >= to) {
2122 			if (folio_test_uptodate(folio)) {
2123 				if (!buffer_uptodate(bh))
2124 					set_buffer_uptodate(bh);
2125 			}
2126 			continue;
2127 		}
2128 		if (buffer_new(bh))
2129 			clear_buffer_new(bh);
2130 		if (!buffer_mapped(bh)) {
2131 			WARN_ON(bh->b_size != blocksize);
2132 			if (get_block) {
2133 				err = get_block(inode, block, bh, 1);
2134 				if (err)
2135 					break;
2136 			} else {
2137 				iomap_to_bh(inode, block, bh, iomap);
2138 			}
2139 
2140 			if (buffer_new(bh)) {
2141 				clean_bdev_bh_alias(bh);
2142 				if (folio_test_uptodate(folio)) {
2143 					clear_buffer_new(bh);
2144 					set_buffer_uptodate(bh);
2145 					mark_buffer_dirty(bh);
2146 					continue;
2147 				}
2148 				if (block_end > to || block_start < from)
2149 					folio_zero_segments(folio,
2150 						to, block_end,
2151 						block_start, from);
2152 				continue;
2153 			}
2154 		}
2155 		if (folio_test_uptodate(folio)) {
2156 			if (!buffer_uptodate(bh))
2157 				set_buffer_uptodate(bh);
2158 			continue;
2159 		}
2160 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2161 		    !buffer_unwritten(bh) &&
2162 		     (block_start < from || block_end > to)) {
2163 			bh_read_nowait(bh, 0);
2164 			*wait_bh++=bh;
2165 		}
2166 	}
2167 	/*
2168 	 * If we issued read requests - let them complete.
2169 	 */
2170 	while(wait_bh > wait) {
2171 		wait_on_buffer(*--wait_bh);
2172 		if (!buffer_uptodate(*wait_bh))
2173 			err = -EIO;
2174 	}
2175 	if (unlikely(err))
2176 		folio_zero_new_buffers(folio, from, to);
2177 	return err;
2178 }
2179 
2180 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2181 		get_block_t *get_block)
2182 {
2183 	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2184 				       NULL);
2185 }
2186 EXPORT_SYMBOL(__block_write_begin);
2187 
2188 static int __block_commit_write(struct inode *inode, struct folio *folio,
2189 		size_t from, size_t to)
2190 {
2191 	size_t block_start, block_end;
2192 	bool partial = false;
2193 	unsigned blocksize;
2194 	struct buffer_head *bh, *head;
2195 
2196 	bh = head = folio_buffers(folio);
2197 	blocksize = bh->b_size;
2198 
2199 	block_start = 0;
2200 	do {
2201 		block_end = block_start + blocksize;
2202 		if (block_end <= from || block_start >= to) {
2203 			if (!buffer_uptodate(bh))
2204 				partial = true;
2205 		} else {
2206 			set_buffer_uptodate(bh);
2207 			mark_buffer_dirty(bh);
2208 		}
2209 		if (buffer_new(bh))
2210 			clear_buffer_new(bh);
2211 
2212 		block_start = block_end;
2213 		bh = bh->b_this_page;
2214 	} while (bh != head);
2215 
2216 	/*
2217 	 * If this is a partial write which happened to make all buffers
2218 	 * uptodate then we can optimize away a bogus read_folio() for
2219 	 * the next read(). Here we 'discover' whether the folio went
2220 	 * uptodate as a result of this (potentially partial) write.
2221 	 */
2222 	if (!partial)
2223 		folio_mark_uptodate(folio);
2224 	return 0;
2225 }
2226 
2227 /*
2228  * block_write_begin takes care of the basic task of block allocation and
2229  * bringing partial write blocks uptodate first.
2230  *
2231  * The filesystem needs to handle block truncation upon failure.
2232  */
2233 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2234 		struct page **pagep, get_block_t *get_block)
2235 {
2236 	pgoff_t index = pos >> PAGE_SHIFT;
2237 	struct page *page;
2238 	int status;
2239 
2240 	page = grab_cache_page_write_begin(mapping, index);
2241 	if (!page)
2242 		return -ENOMEM;
2243 
2244 	status = __block_write_begin(page, pos, len, get_block);
2245 	if (unlikely(status)) {
2246 		unlock_page(page);
2247 		put_page(page);
2248 		page = NULL;
2249 	}
2250 
2251 	*pagep = page;
2252 	return status;
2253 }
2254 EXPORT_SYMBOL(block_write_begin);
2255 
2256 int block_write_end(struct file *file, struct address_space *mapping,
2257 			loff_t pos, unsigned len, unsigned copied,
2258 			struct page *page, void *fsdata)
2259 {
2260 	struct folio *folio = page_folio(page);
2261 	struct inode *inode = mapping->host;
2262 	size_t start = pos - folio_pos(folio);
2263 
2264 	if (unlikely(copied < len)) {
2265 		/*
2266 		 * The buffers that were written will now be uptodate, so
2267 		 * we don't have to worry about a read_folio reading them
2268 		 * and overwriting a partial write. However if we have
2269 		 * encountered a short write and only partially written
2270 		 * into a buffer, it will not be marked uptodate, so a
2271 		 * read_folio might come in and destroy our partial write.
2272 		 *
2273 		 * Do the simplest thing, and just treat any short write to a
2274 		 * non uptodate folio as a zero-length write, and force the
2275 		 * caller to redo the whole thing.
2276 		 */
2277 		if (!folio_test_uptodate(folio))
2278 			copied = 0;
2279 
2280 		folio_zero_new_buffers(folio, start+copied, start+len);
2281 	}
2282 	flush_dcache_folio(folio);
2283 
2284 	/* This could be a short (even 0-length) commit */
2285 	__block_commit_write(inode, folio, start, start + copied);
2286 
2287 	return copied;
2288 }
2289 EXPORT_SYMBOL(block_write_end);
2290 
2291 int generic_write_end(struct file *file, struct address_space *mapping,
2292 			loff_t pos, unsigned len, unsigned copied,
2293 			struct page *page, void *fsdata)
2294 {
2295 	struct inode *inode = mapping->host;
2296 	loff_t old_size = inode->i_size;
2297 	bool i_size_changed = false;
2298 
2299 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2300 
2301 	/*
2302 	 * No need to use i_size_read() here, the i_size cannot change under us
2303 	 * because we hold i_rwsem.
2304 	 *
2305 	 * But it's important to update i_size while still holding page lock:
2306 	 * page writeout could otherwise come in and zero beyond i_size.
2307 	 */
2308 	if (pos + copied > inode->i_size) {
2309 		i_size_write(inode, pos + copied);
2310 		i_size_changed = true;
2311 	}
2312 
2313 	unlock_page(page);
2314 	put_page(page);
2315 
2316 	if (old_size < pos)
2317 		pagecache_isize_extended(inode, old_size, pos);
2318 	/*
2319 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2320 	 * makes the holding time of page lock longer. Second, it forces lock
2321 	 * ordering of page lock and transaction start for journaling
2322 	 * filesystems.
2323 	 */
2324 	if (i_size_changed)
2325 		mark_inode_dirty(inode);
2326 	return copied;
2327 }
2328 EXPORT_SYMBOL(generic_write_end);
2329 
2330 /*
2331  * block_is_partially_uptodate checks whether buffers within a folio are
2332  * uptodate or not.
2333  *
2334  * Returns true if all buffers which correspond to the specified part
2335  * of the folio are uptodate.
2336  */
2337 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2338 {
2339 	unsigned block_start, block_end, blocksize;
2340 	unsigned to;
2341 	struct buffer_head *bh, *head;
2342 	bool ret = true;
2343 
2344 	head = folio_buffers(folio);
2345 	if (!head)
2346 		return false;
2347 	blocksize = head->b_size;
2348 	to = min_t(unsigned, folio_size(folio) - from, count);
2349 	to = from + to;
2350 	if (from < blocksize && to > folio_size(folio) - blocksize)
2351 		return false;
2352 
2353 	bh = head;
2354 	block_start = 0;
2355 	do {
2356 		block_end = block_start + blocksize;
2357 		if (block_end > from && block_start < to) {
2358 			if (!buffer_uptodate(bh)) {
2359 				ret = false;
2360 				break;
2361 			}
2362 			if (block_end >= to)
2363 				break;
2364 		}
2365 		block_start = block_end;
2366 		bh = bh->b_this_page;
2367 	} while (bh != head);
2368 
2369 	return ret;
2370 }
2371 EXPORT_SYMBOL(block_is_partially_uptodate);
2372 
2373 /*
2374  * Generic "read_folio" function for block devices that have the normal
2375  * get_block functionality. This is most of the block device filesystems.
2376  * Reads the folio asynchronously --- the unlock_buffer() and
2377  * set/clear_buffer_uptodate() functions propagate buffer state into the
2378  * folio once IO has completed.
2379  */
2380 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2381 {
2382 	struct inode *inode = folio->mapping->host;
2383 	sector_t iblock, lblock;
2384 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2385 	unsigned int blocksize, bbits;
2386 	int nr, i;
2387 	int fully_mapped = 1;
2388 	bool page_error = false;
2389 	loff_t limit = i_size_read(inode);
2390 
2391 	/* This is needed for ext4. */
2392 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2393 		limit = inode->i_sb->s_maxbytes;
2394 
2395 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2396 
2397 	head = folio_create_buffers(folio, inode, 0);
2398 	blocksize = head->b_size;
2399 	bbits = block_size_bits(blocksize);
2400 
2401 	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2402 	lblock = (limit+blocksize-1) >> bbits;
2403 	bh = head;
2404 	nr = 0;
2405 	i = 0;
2406 
2407 	do {
2408 		if (buffer_uptodate(bh))
2409 			continue;
2410 
2411 		if (!buffer_mapped(bh)) {
2412 			int err = 0;
2413 
2414 			fully_mapped = 0;
2415 			if (iblock < lblock) {
2416 				WARN_ON(bh->b_size != blocksize);
2417 				err = get_block(inode, iblock, bh, 0);
2418 				if (err) {
2419 					folio_set_error(folio);
2420 					page_error = true;
2421 				}
2422 			}
2423 			if (!buffer_mapped(bh)) {
2424 				folio_zero_range(folio, i * blocksize,
2425 						blocksize);
2426 				if (!err)
2427 					set_buffer_uptodate(bh);
2428 				continue;
2429 			}
2430 			/*
2431 			 * get_block() might have updated the buffer
2432 			 * synchronously
2433 			 */
2434 			if (buffer_uptodate(bh))
2435 				continue;
2436 		}
2437 		arr[nr++] = bh;
2438 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2439 
2440 	if (fully_mapped)
2441 		folio_set_mappedtodisk(folio);
2442 
2443 	if (!nr) {
2444 		/*
2445 		 * All buffers are uptodate - we can set the folio uptodate
2446 		 * as well. But not if get_block() returned an error.
2447 		 */
2448 		if (!page_error)
2449 			folio_mark_uptodate(folio);
2450 		folio_unlock(folio);
2451 		return 0;
2452 	}
2453 
2454 	/* Stage two: lock the buffers */
2455 	for (i = 0; i < nr; i++) {
2456 		bh = arr[i];
2457 		lock_buffer(bh);
2458 		mark_buffer_async_read(bh);
2459 	}
2460 
2461 	/*
2462 	 * Stage 3: start the IO.  Check for uptodateness
2463 	 * inside the buffer lock in case another process reading
2464 	 * the underlying blockdev brought it uptodate (the sct fix).
2465 	 */
2466 	for (i = 0; i < nr; i++) {
2467 		bh = arr[i];
2468 		if (buffer_uptodate(bh))
2469 			end_buffer_async_read(bh, 1);
2470 		else
2471 			submit_bh(REQ_OP_READ, bh);
2472 	}
2473 	return 0;
2474 }
2475 EXPORT_SYMBOL(block_read_full_folio);
2476 
2477 /* utility function for filesystems that need to do work on expanding
2478  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2479  * deal with the hole.
2480  */
2481 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2482 {
2483 	struct address_space *mapping = inode->i_mapping;
2484 	const struct address_space_operations *aops = mapping->a_ops;
2485 	struct page *page;
2486 	void *fsdata = NULL;
2487 	int err;
2488 
2489 	err = inode_newsize_ok(inode, size);
2490 	if (err)
2491 		goto out;
2492 
2493 	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2494 	if (err)
2495 		goto out;
2496 
2497 	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2498 	BUG_ON(err > 0);
2499 
2500 out:
2501 	return err;
2502 }
2503 EXPORT_SYMBOL(generic_cont_expand_simple);
2504 
2505 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2506 			    loff_t pos, loff_t *bytes)
2507 {
2508 	struct inode *inode = mapping->host;
2509 	const struct address_space_operations *aops = mapping->a_ops;
2510 	unsigned int blocksize = i_blocksize(inode);
2511 	struct page *page;
2512 	void *fsdata = NULL;
2513 	pgoff_t index, curidx;
2514 	loff_t curpos;
2515 	unsigned zerofrom, offset, len;
2516 	int err = 0;
2517 
2518 	index = pos >> PAGE_SHIFT;
2519 	offset = pos & ~PAGE_MASK;
2520 
2521 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2522 		zerofrom = curpos & ~PAGE_MASK;
2523 		if (zerofrom & (blocksize-1)) {
2524 			*bytes |= (blocksize-1);
2525 			(*bytes)++;
2526 		}
2527 		len = PAGE_SIZE - zerofrom;
2528 
2529 		err = aops->write_begin(file, mapping, curpos, len,
2530 					    &page, &fsdata);
2531 		if (err)
2532 			goto out;
2533 		zero_user(page, zerofrom, len);
2534 		err = aops->write_end(file, mapping, curpos, len, len,
2535 						page, fsdata);
2536 		if (err < 0)
2537 			goto out;
2538 		BUG_ON(err != len);
2539 		err = 0;
2540 
2541 		balance_dirty_pages_ratelimited(mapping);
2542 
2543 		if (fatal_signal_pending(current)) {
2544 			err = -EINTR;
2545 			goto out;
2546 		}
2547 	}
2548 
2549 	/* page covers the boundary, find the boundary offset */
2550 	if (index == curidx) {
2551 		zerofrom = curpos & ~PAGE_MASK;
2552 		/* if we will expand the thing last block will be filled */
2553 		if (offset <= zerofrom) {
2554 			goto out;
2555 		}
2556 		if (zerofrom & (blocksize-1)) {
2557 			*bytes |= (blocksize-1);
2558 			(*bytes)++;
2559 		}
2560 		len = offset - zerofrom;
2561 
2562 		err = aops->write_begin(file, mapping, curpos, len,
2563 					    &page, &fsdata);
2564 		if (err)
2565 			goto out;
2566 		zero_user(page, zerofrom, len);
2567 		err = aops->write_end(file, mapping, curpos, len, len,
2568 						page, fsdata);
2569 		if (err < 0)
2570 			goto out;
2571 		BUG_ON(err != len);
2572 		err = 0;
2573 	}
2574 out:
2575 	return err;
2576 }
2577 
2578 /*
2579  * For moronic filesystems that do not allow holes in file.
2580  * We may have to extend the file.
2581  */
2582 int cont_write_begin(struct file *file, struct address_space *mapping,
2583 			loff_t pos, unsigned len,
2584 			struct page **pagep, void **fsdata,
2585 			get_block_t *get_block, loff_t *bytes)
2586 {
2587 	struct inode *inode = mapping->host;
2588 	unsigned int blocksize = i_blocksize(inode);
2589 	unsigned int zerofrom;
2590 	int err;
2591 
2592 	err = cont_expand_zero(file, mapping, pos, bytes);
2593 	if (err)
2594 		return err;
2595 
2596 	zerofrom = *bytes & ~PAGE_MASK;
2597 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2598 		*bytes |= (blocksize-1);
2599 		(*bytes)++;
2600 	}
2601 
2602 	return block_write_begin(mapping, pos, len, pagep, get_block);
2603 }
2604 EXPORT_SYMBOL(cont_write_begin);
2605 
2606 int block_commit_write(struct page *page, unsigned from, unsigned to)
2607 {
2608 	struct folio *folio = page_folio(page);
2609 	struct inode *inode = folio->mapping->host;
2610 	__block_commit_write(inode, folio, from, to);
2611 	return 0;
2612 }
2613 EXPORT_SYMBOL(block_commit_write);
2614 
2615 /*
2616  * block_page_mkwrite() is not allowed to change the file size as it gets
2617  * called from a page fault handler when a page is first dirtied. Hence we must
2618  * be careful to check for EOF conditions here. We set the page up correctly
2619  * for a written page which means we get ENOSPC checking when writing into
2620  * holes and correct delalloc and unwritten extent mapping on filesystems that
2621  * support these features.
2622  *
2623  * We are not allowed to take the i_mutex here so we have to play games to
2624  * protect against truncate races as the page could now be beyond EOF.  Because
2625  * truncate writes the inode size before removing pages, once we have the
2626  * page lock we can determine safely if the page is beyond EOF. If it is not
2627  * beyond EOF, then the page is guaranteed safe against truncation until we
2628  * unlock the page.
2629  *
2630  * Direct callers of this function should protect against filesystem freezing
2631  * using sb_start_pagefault() - sb_end_pagefault() functions.
2632  */
2633 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2634 			 get_block_t get_block)
2635 {
2636 	struct folio *folio = page_folio(vmf->page);
2637 	struct inode *inode = file_inode(vma->vm_file);
2638 	unsigned long end;
2639 	loff_t size;
2640 	int ret;
2641 
2642 	folio_lock(folio);
2643 	size = i_size_read(inode);
2644 	if ((folio->mapping != inode->i_mapping) ||
2645 	    (folio_pos(folio) >= size)) {
2646 		/* We overload EFAULT to mean page got truncated */
2647 		ret = -EFAULT;
2648 		goto out_unlock;
2649 	}
2650 
2651 	end = folio_size(folio);
2652 	/* folio is wholly or partially inside EOF */
2653 	if (folio_pos(folio) + end > size)
2654 		end = size - folio_pos(folio);
2655 
2656 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2657 	if (!ret)
2658 		ret = __block_commit_write(inode, folio, 0, end);
2659 
2660 	if (unlikely(ret < 0))
2661 		goto out_unlock;
2662 	folio_mark_dirty(folio);
2663 	folio_wait_stable(folio);
2664 	return 0;
2665 out_unlock:
2666 	folio_unlock(folio);
2667 	return ret;
2668 }
2669 EXPORT_SYMBOL(block_page_mkwrite);
2670 
2671 int block_truncate_page(struct address_space *mapping,
2672 			loff_t from, get_block_t *get_block)
2673 {
2674 	pgoff_t index = from >> PAGE_SHIFT;
2675 	unsigned blocksize;
2676 	sector_t iblock;
2677 	size_t offset, length, pos;
2678 	struct inode *inode = mapping->host;
2679 	struct folio *folio;
2680 	struct buffer_head *bh;
2681 	int err = 0;
2682 
2683 	blocksize = i_blocksize(inode);
2684 	length = from & (blocksize - 1);
2685 
2686 	/* Block boundary? Nothing to do */
2687 	if (!length)
2688 		return 0;
2689 
2690 	length = blocksize - length;
2691 	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2692 
2693 	folio = filemap_grab_folio(mapping, index);
2694 	if (IS_ERR(folio))
2695 		return PTR_ERR(folio);
2696 
2697 	bh = folio_buffers(folio);
2698 	if (!bh) {
2699 		folio_create_empty_buffers(folio, blocksize, 0);
2700 		bh = folio_buffers(folio);
2701 	}
2702 
2703 	/* Find the buffer that contains "offset" */
2704 	offset = offset_in_folio(folio, from);
2705 	pos = blocksize;
2706 	while (offset >= pos) {
2707 		bh = bh->b_this_page;
2708 		iblock++;
2709 		pos += blocksize;
2710 	}
2711 
2712 	if (!buffer_mapped(bh)) {
2713 		WARN_ON(bh->b_size != blocksize);
2714 		err = get_block(inode, iblock, bh, 0);
2715 		if (err)
2716 			goto unlock;
2717 		/* unmapped? It's a hole - nothing to do */
2718 		if (!buffer_mapped(bh))
2719 			goto unlock;
2720 	}
2721 
2722 	/* Ok, it's mapped. Make sure it's up-to-date */
2723 	if (folio_test_uptodate(folio))
2724 		set_buffer_uptodate(bh);
2725 
2726 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2727 		err = bh_read(bh, 0);
2728 		/* Uhhuh. Read error. Complain and punt. */
2729 		if (err < 0)
2730 			goto unlock;
2731 	}
2732 
2733 	folio_zero_range(folio, offset, length);
2734 	mark_buffer_dirty(bh);
2735 
2736 unlock:
2737 	folio_unlock(folio);
2738 	folio_put(folio);
2739 
2740 	return err;
2741 }
2742 EXPORT_SYMBOL(block_truncate_page);
2743 
2744 /*
2745  * The generic ->writepage function for buffer-backed address_spaces
2746  */
2747 int block_write_full_page(struct page *page, get_block_t *get_block,
2748 			struct writeback_control *wbc)
2749 {
2750 	struct folio *folio = page_folio(page);
2751 	struct inode * const inode = folio->mapping->host;
2752 	loff_t i_size = i_size_read(inode);
2753 
2754 	/* Is the folio fully inside i_size? */
2755 	if (folio_pos(folio) + folio_size(folio) <= i_size)
2756 		return __block_write_full_folio(inode, folio, get_block, wbc,
2757 					       end_buffer_async_write);
2758 
2759 	/* Is the folio fully outside i_size? (truncate in progress) */
2760 	if (folio_pos(folio) >= i_size) {
2761 		folio_unlock(folio);
2762 		return 0; /* don't care */
2763 	}
2764 
2765 	/*
2766 	 * The folio straddles i_size.  It must be zeroed out on each and every
2767 	 * writepage invocation because it may be mmapped.  "A file is mapped
2768 	 * in multiples of the page size.  For a file that is not a multiple of
2769 	 * the page size, the remaining memory is zeroed when mapped, and
2770 	 * writes to that region are not written out to the file."
2771 	 */
2772 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2773 			folio_size(folio));
2774 	return __block_write_full_folio(inode, folio, get_block, wbc,
2775 			end_buffer_async_write);
2776 }
2777 EXPORT_SYMBOL(block_write_full_page);
2778 
2779 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2780 			    get_block_t *get_block)
2781 {
2782 	struct inode *inode = mapping->host;
2783 	struct buffer_head tmp = {
2784 		.b_size = i_blocksize(inode),
2785 	};
2786 
2787 	get_block(inode, block, &tmp, 0);
2788 	return tmp.b_blocknr;
2789 }
2790 EXPORT_SYMBOL(generic_block_bmap);
2791 
2792 static void end_bio_bh_io_sync(struct bio *bio)
2793 {
2794 	struct buffer_head *bh = bio->bi_private;
2795 
2796 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2797 		set_bit(BH_Quiet, &bh->b_state);
2798 
2799 	bh->b_end_io(bh, !bio->bi_status);
2800 	bio_put(bio);
2801 }
2802 
2803 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2804 			  struct writeback_control *wbc)
2805 {
2806 	const enum req_op op = opf & REQ_OP_MASK;
2807 	struct bio *bio;
2808 
2809 	BUG_ON(!buffer_locked(bh));
2810 	BUG_ON(!buffer_mapped(bh));
2811 	BUG_ON(!bh->b_end_io);
2812 	BUG_ON(buffer_delay(bh));
2813 	BUG_ON(buffer_unwritten(bh));
2814 
2815 	/*
2816 	 * Only clear out a write error when rewriting
2817 	 */
2818 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2819 		clear_buffer_write_io_error(bh);
2820 
2821 	if (buffer_meta(bh))
2822 		opf |= REQ_META;
2823 	if (buffer_prio(bh))
2824 		opf |= REQ_PRIO;
2825 
2826 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2827 
2828 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2829 
2830 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2831 
2832 	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2833 
2834 	bio->bi_end_io = end_bio_bh_io_sync;
2835 	bio->bi_private = bh;
2836 
2837 	/* Take care of bh's that straddle the end of the device */
2838 	guard_bio_eod(bio);
2839 
2840 	if (wbc) {
2841 		wbc_init_bio(wbc, bio);
2842 		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2843 	}
2844 
2845 	submit_bio(bio);
2846 }
2847 
2848 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2849 {
2850 	submit_bh_wbc(opf, bh, NULL);
2851 }
2852 EXPORT_SYMBOL(submit_bh);
2853 
2854 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2855 {
2856 	lock_buffer(bh);
2857 	if (!test_clear_buffer_dirty(bh)) {
2858 		unlock_buffer(bh);
2859 		return;
2860 	}
2861 	bh->b_end_io = end_buffer_write_sync;
2862 	get_bh(bh);
2863 	submit_bh(REQ_OP_WRITE | op_flags, bh);
2864 }
2865 EXPORT_SYMBOL(write_dirty_buffer);
2866 
2867 /*
2868  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2869  * and then start new I/O and then wait upon it.  The caller must have a ref on
2870  * the buffer_head.
2871  */
2872 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2873 {
2874 	WARN_ON(atomic_read(&bh->b_count) < 1);
2875 	lock_buffer(bh);
2876 	if (test_clear_buffer_dirty(bh)) {
2877 		/*
2878 		 * The bh should be mapped, but it might not be if the
2879 		 * device was hot-removed. Not much we can do but fail the I/O.
2880 		 */
2881 		if (!buffer_mapped(bh)) {
2882 			unlock_buffer(bh);
2883 			return -EIO;
2884 		}
2885 
2886 		get_bh(bh);
2887 		bh->b_end_io = end_buffer_write_sync;
2888 		submit_bh(REQ_OP_WRITE | op_flags, bh);
2889 		wait_on_buffer(bh);
2890 		if (!buffer_uptodate(bh))
2891 			return -EIO;
2892 	} else {
2893 		unlock_buffer(bh);
2894 	}
2895 	return 0;
2896 }
2897 EXPORT_SYMBOL(__sync_dirty_buffer);
2898 
2899 int sync_dirty_buffer(struct buffer_head *bh)
2900 {
2901 	return __sync_dirty_buffer(bh, REQ_SYNC);
2902 }
2903 EXPORT_SYMBOL(sync_dirty_buffer);
2904 
2905 /*
2906  * try_to_free_buffers() checks if all the buffers on this particular folio
2907  * are unused, and releases them if so.
2908  *
2909  * Exclusion against try_to_free_buffers may be obtained by either
2910  * locking the folio or by holding its mapping's private_lock.
2911  *
2912  * If the folio is dirty but all the buffers are clean then we need to
2913  * be sure to mark the folio clean as well.  This is because the folio
2914  * may be against a block device, and a later reattachment of buffers
2915  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2916  * filesystem data on the same device.
2917  *
2918  * The same applies to regular filesystem folios: if all the buffers are
2919  * clean then we set the folio clean and proceed.  To do that, we require
2920  * total exclusion from block_dirty_folio().  That is obtained with
2921  * private_lock.
2922  *
2923  * try_to_free_buffers() is non-blocking.
2924  */
2925 static inline int buffer_busy(struct buffer_head *bh)
2926 {
2927 	return atomic_read(&bh->b_count) |
2928 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2929 }
2930 
2931 static bool
2932 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2933 {
2934 	struct buffer_head *head = folio_buffers(folio);
2935 	struct buffer_head *bh;
2936 
2937 	bh = head;
2938 	do {
2939 		if (buffer_busy(bh))
2940 			goto failed;
2941 		bh = bh->b_this_page;
2942 	} while (bh != head);
2943 
2944 	do {
2945 		struct buffer_head *next = bh->b_this_page;
2946 
2947 		if (bh->b_assoc_map)
2948 			__remove_assoc_queue(bh);
2949 		bh = next;
2950 	} while (bh != head);
2951 	*buffers_to_free = head;
2952 	folio_detach_private(folio);
2953 	return true;
2954 failed:
2955 	return false;
2956 }
2957 
2958 bool try_to_free_buffers(struct folio *folio)
2959 {
2960 	struct address_space * const mapping = folio->mapping;
2961 	struct buffer_head *buffers_to_free = NULL;
2962 	bool ret = 0;
2963 
2964 	BUG_ON(!folio_test_locked(folio));
2965 	if (folio_test_writeback(folio))
2966 		return false;
2967 
2968 	if (mapping == NULL) {		/* can this still happen? */
2969 		ret = drop_buffers(folio, &buffers_to_free);
2970 		goto out;
2971 	}
2972 
2973 	spin_lock(&mapping->private_lock);
2974 	ret = drop_buffers(folio, &buffers_to_free);
2975 
2976 	/*
2977 	 * If the filesystem writes its buffers by hand (eg ext3)
2978 	 * then we can have clean buffers against a dirty folio.  We
2979 	 * clean the folio here; otherwise the VM will never notice
2980 	 * that the filesystem did any IO at all.
2981 	 *
2982 	 * Also, during truncate, discard_buffer will have marked all
2983 	 * the folio's buffers clean.  We discover that here and clean
2984 	 * the folio also.
2985 	 *
2986 	 * private_lock must be held over this entire operation in order
2987 	 * to synchronise against block_dirty_folio and prevent the
2988 	 * dirty bit from being lost.
2989 	 */
2990 	if (ret)
2991 		folio_cancel_dirty(folio);
2992 	spin_unlock(&mapping->private_lock);
2993 out:
2994 	if (buffers_to_free) {
2995 		struct buffer_head *bh = buffers_to_free;
2996 
2997 		do {
2998 			struct buffer_head *next = bh->b_this_page;
2999 			free_buffer_head(bh);
3000 			bh = next;
3001 		} while (bh != buffers_to_free);
3002 	}
3003 	return ret;
3004 }
3005 EXPORT_SYMBOL(try_to_free_buffers);
3006 
3007 /*
3008  * Buffer-head allocation
3009  */
3010 static struct kmem_cache *bh_cachep __read_mostly;
3011 
3012 /*
3013  * Once the number of bh's in the machine exceeds this level, we start
3014  * stripping them in writeback.
3015  */
3016 static unsigned long max_buffer_heads;
3017 
3018 int buffer_heads_over_limit;
3019 
3020 struct bh_accounting {
3021 	int nr;			/* Number of live bh's */
3022 	int ratelimit;		/* Limit cacheline bouncing */
3023 };
3024 
3025 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3026 
3027 static void recalc_bh_state(void)
3028 {
3029 	int i;
3030 	int tot = 0;
3031 
3032 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3033 		return;
3034 	__this_cpu_write(bh_accounting.ratelimit, 0);
3035 	for_each_online_cpu(i)
3036 		tot += per_cpu(bh_accounting, i).nr;
3037 	buffer_heads_over_limit = (tot > max_buffer_heads);
3038 }
3039 
3040 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3041 {
3042 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3043 	if (ret) {
3044 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3045 		spin_lock_init(&ret->b_uptodate_lock);
3046 		preempt_disable();
3047 		__this_cpu_inc(bh_accounting.nr);
3048 		recalc_bh_state();
3049 		preempt_enable();
3050 	}
3051 	return ret;
3052 }
3053 EXPORT_SYMBOL(alloc_buffer_head);
3054 
3055 void free_buffer_head(struct buffer_head *bh)
3056 {
3057 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3058 	kmem_cache_free(bh_cachep, bh);
3059 	preempt_disable();
3060 	__this_cpu_dec(bh_accounting.nr);
3061 	recalc_bh_state();
3062 	preempt_enable();
3063 }
3064 EXPORT_SYMBOL(free_buffer_head);
3065 
3066 static int buffer_exit_cpu_dead(unsigned int cpu)
3067 {
3068 	int i;
3069 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3070 
3071 	for (i = 0; i < BH_LRU_SIZE; i++) {
3072 		brelse(b->bhs[i]);
3073 		b->bhs[i] = NULL;
3074 	}
3075 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3076 	per_cpu(bh_accounting, cpu).nr = 0;
3077 	return 0;
3078 }
3079 
3080 /**
3081  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3082  * @bh: struct buffer_head
3083  *
3084  * Return true if the buffer is up-to-date and false,
3085  * with the buffer locked, if not.
3086  */
3087 int bh_uptodate_or_lock(struct buffer_head *bh)
3088 {
3089 	if (!buffer_uptodate(bh)) {
3090 		lock_buffer(bh);
3091 		if (!buffer_uptodate(bh))
3092 			return 0;
3093 		unlock_buffer(bh);
3094 	}
3095 	return 1;
3096 }
3097 EXPORT_SYMBOL(bh_uptodate_or_lock);
3098 
3099 /**
3100  * __bh_read - Submit read for a locked buffer
3101  * @bh: struct buffer_head
3102  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3103  * @wait: wait until reading finish
3104  *
3105  * Returns zero on success or don't wait, and -EIO on error.
3106  */
3107 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3108 {
3109 	int ret = 0;
3110 
3111 	BUG_ON(!buffer_locked(bh));
3112 
3113 	get_bh(bh);
3114 	bh->b_end_io = end_buffer_read_sync;
3115 	submit_bh(REQ_OP_READ | op_flags, bh);
3116 	if (wait) {
3117 		wait_on_buffer(bh);
3118 		if (!buffer_uptodate(bh))
3119 			ret = -EIO;
3120 	}
3121 	return ret;
3122 }
3123 EXPORT_SYMBOL(__bh_read);
3124 
3125 /**
3126  * __bh_read_batch - Submit read for a batch of unlocked buffers
3127  * @nr: entry number of the buffer batch
3128  * @bhs: a batch of struct buffer_head
3129  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3130  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3131  *              buffer that cannot lock.
3132  *
3133  * Returns zero on success or don't wait, and -EIO on error.
3134  */
3135 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3136 		     blk_opf_t op_flags, bool force_lock)
3137 {
3138 	int i;
3139 
3140 	for (i = 0; i < nr; i++) {
3141 		struct buffer_head *bh = bhs[i];
3142 
3143 		if (buffer_uptodate(bh))
3144 			continue;
3145 
3146 		if (force_lock)
3147 			lock_buffer(bh);
3148 		else
3149 			if (!trylock_buffer(bh))
3150 				continue;
3151 
3152 		if (buffer_uptodate(bh)) {
3153 			unlock_buffer(bh);
3154 			continue;
3155 		}
3156 
3157 		bh->b_end_io = end_buffer_read_sync;
3158 		get_bh(bh);
3159 		submit_bh(REQ_OP_READ | op_flags, bh);
3160 	}
3161 }
3162 EXPORT_SYMBOL(__bh_read_batch);
3163 
3164 void __init buffer_init(void)
3165 {
3166 	unsigned long nrpages;
3167 	int ret;
3168 
3169 	bh_cachep = kmem_cache_create("buffer_head",
3170 			sizeof(struct buffer_head), 0,
3171 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3172 				SLAB_MEM_SPREAD),
3173 				NULL);
3174 
3175 	/*
3176 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3177 	 */
3178 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3179 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3180 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3181 					NULL, buffer_exit_cpu_dead);
3182 	WARN_ON(ret < 0);
3183 }
3184